diff options
author | David S. Miller <davem@davemloft.net> | 2008-12-28 23:19:47 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-12-28 23:19:47 -0500 |
commit | e3c6d4ee545e427b55882d97d3b663c6411645fe (patch) | |
tree | 294326663fb757739a98083c2ddd570d1eaf7337 /kernel | |
parent | 5bc053089376217943187ed5153d0d1e5c5085b6 (diff) | |
parent | 3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts:
arch/sparc64/kernel/idprom.c
Diffstat (limited to 'kernel')
68 files changed, 6632 insertions, 2023 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 19fad003b19d..027edda63511 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o \ | |||
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o | 12 | notifier.o ksysfs.o pm_qos_params.o sched_clock.o cred.o |
13 | 13 | ||
14 | ifdef CONFIG_FUNCTION_TRACER | 14 | ifdef CONFIG_FUNCTION_TRACER |
15 | # Do not trace debug files and internal ftrace files | 15 | # Do not trace debug files and internal ftrace files |
@@ -19,7 +19,6 @@ CFLAGS_REMOVE_mutex-debug.o = -pg | |||
19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg | 19 | CFLAGS_REMOVE_rtmutex-debug.o = -pg |
20 | CFLAGS_REMOVE_cgroup-debug.o = -pg | 20 | CFLAGS_REMOVE_cgroup-debug.o = -pg |
21 | CFLAGS_REMOVE_sched_clock.o = -pg | 21 | CFLAGS_REMOVE_sched_clock.o = -pg |
22 | CFLAGS_REMOVE_sched.o = -pg | ||
23 | endif | 22 | endif |
24 | 23 | ||
25 | obj-$(CONFIG_FREEZER) += freezer.o | 24 | obj-$(CONFIG_FREEZER) += freezer.o |
@@ -90,7 +89,7 @@ obj-$(CONFIG_FUNCTION_TRACER) += trace/ | |||
90 | obj-$(CONFIG_TRACING) += trace/ | 89 | obj-$(CONFIG_TRACING) += trace/ |
91 | obj-$(CONFIG_SMP) += sched_cpupri.o | 90 | obj-$(CONFIG_SMP) += sched_cpupri.o |
92 | 91 | ||
93 | ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y) | 92 | ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) |
94 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is | 93 | # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is |
95 | # needed for x86 only. Why this used to be enabled for all architectures is beyond | 94 | # needed for x86 only. Why this used to be enabled for all architectures is beyond |
96 | # me. I suspect most platforms don't need this, but until we know that for sure | 95 | # me. I suspect most platforms don't need this, but until we know that for sure |
diff --git a/kernel/acct.c b/kernel/acct.c index f6006a60df5d..d57b7cbb98b6 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -530,15 +530,14 @@ static void do_acct_process(struct bsd_acct_struct *acct, | |||
530 | do_div(elapsed, AHZ); | 530 | do_div(elapsed, AHZ); |
531 | ac.ac_btime = get_seconds() - elapsed; | 531 | ac.ac_btime = get_seconds() - elapsed; |
532 | /* we really need to bite the bullet and change layout */ | 532 | /* we really need to bite the bullet and change layout */ |
533 | ac.ac_uid = current->uid; | 533 | current_uid_gid(&ac.ac_uid, &ac.ac_gid); |
534 | ac.ac_gid = current->gid; | ||
535 | #if ACCT_VERSION==2 | 534 | #if ACCT_VERSION==2 |
536 | ac.ac_ahz = AHZ; | 535 | ac.ac_ahz = AHZ; |
537 | #endif | 536 | #endif |
538 | #if ACCT_VERSION==1 || ACCT_VERSION==2 | 537 | #if ACCT_VERSION==1 || ACCT_VERSION==2 |
539 | /* backward-compatible 16 bit fields */ | 538 | /* backward-compatible 16 bit fields */ |
540 | ac.ac_uid16 = current->uid; | 539 | ac.ac_uid16 = ac.ac_uid; |
541 | ac.ac_gid16 = current->gid; | 540 | ac.ac_gid16 = ac.ac_gid; |
542 | #endif | 541 | #endif |
543 | #if ACCT_VERSION==3 | 542 | #if ACCT_VERSION==3 |
544 | ac.ac_pid = task_tgid_nr_ns(current, ns); | 543 | ac.ac_pid = task_tgid_nr_ns(current, ns); |
diff --git a/kernel/audit.c b/kernel/audit.c index 4414e93d8750..ce6d8ea3131e 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -61,8 +61,11 @@ | |||
61 | 61 | ||
62 | #include "audit.h" | 62 | #include "audit.h" |
63 | 63 | ||
64 | /* No auditing will take place until audit_initialized != 0. | 64 | /* No auditing will take place until audit_initialized == AUDIT_INITIALIZED. |
65 | * (Initialization happens after skb_init is called.) */ | 65 | * (Initialization happens after skb_init is called.) */ |
66 | #define AUDIT_DISABLED -1 | ||
67 | #define AUDIT_UNINITIALIZED 0 | ||
68 | #define AUDIT_INITIALIZED 1 | ||
66 | static int audit_initialized; | 69 | static int audit_initialized; |
67 | 70 | ||
68 | #define AUDIT_OFF 0 | 71 | #define AUDIT_OFF 0 |
@@ -965,6 +968,9 @@ static int __init audit_init(void) | |||
965 | { | 968 | { |
966 | int i; | 969 | int i; |
967 | 970 | ||
971 | if (audit_initialized == AUDIT_DISABLED) | ||
972 | return 0; | ||
973 | |||
968 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", | 974 | printk(KERN_INFO "audit: initializing netlink socket (%s)\n", |
969 | audit_default ? "enabled" : "disabled"); | 975 | audit_default ? "enabled" : "disabled"); |
970 | audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, | 976 | audit_sock = netlink_kernel_create(&init_net, NETLINK_AUDIT, 0, |
@@ -976,7 +982,7 @@ static int __init audit_init(void) | |||
976 | 982 | ||
977 | skb_queue_head_init(&audit_skb_queue); | 983 | skb_queue_head_init(&audit_skb_queue); |
978 | skb_queue_head_init(&audit_skb_hold_queue); | 984 | skb_queue_head_init(&audit_skb_hold_queue); |
979 | audit_initialized = 1; | 985 | audit_initialized = AUDIT_INITIALIZED; |
980 | audit_enabled = audit_default; | 986 | audit_enabled = audit_default; |
981 | audit_ever_enabled |= !!audit_default; | 987 | audit_ever_enabled |= !!audit_default; |
982 | 988 | ||
@@ -999,13 +1005,21 @@ __initcall(audit_init); | |||
999 | static int __init audit_enable(char *str) | 1005 | static int __init audit_enable(char *str) |
1000 | { | 1006 | { |
1001 | audit_default = !!simple_strtol(str, NULL, 0); | 1007 | audit_default = !!simple_strtol(str, NULL, 0); |
1002 | printk(KERN_INFO "audit: %s%s\n", | 1008 | if (!audit_default) |
1003 | audit_default ? "enabled" : "disabled", | 1009 | audit_initialized = AUDIT_DISABLED; |
1004 | audit_initialized ? "" : " (after initialization)"); | 1010 | |
1005 | if (audit_initialized) { | 1011 | printk(KERN_INFO "audit: %s", audit_default ? "enabled" : "disabled"); |
1012 | |||
1013 | if (audit_initialized == AUDIT_INITIALIZED) { | ||
1006 | audit_enabled = audit_default; | 1014 | audit_enabled = audit_default; |
1007 | audit_ever_enabled |= !!audit_default; | 1015 | audit_ever_enabled |= !!audit_default; |
1016 | } else if (audit_initialized == AUDIT_UNINITIALIZED) { | ||
1017 | printk(" (after initialization)"); | ||
1018 | } else { | ||
1019 | printk(" (until reboot)"); | ||
1008 | } | 1020 | } |
1021 | printk("\n"); | ||
1022 | |||
1009 | return 1; | 1023 | return 1; |
1010 | } | 1024 | } |
1011 | 1025 | ||
@@ -1107,9 +1121,7 @@ unsigned int audit_serial(void) | |||
1107 | static inline void audit_get_stamp(struct audit_context *ctx, | 1121 | static inline void audit_get_stamp(struct audit_context *ctx, |
1108 | struct timespec *t, unsigned int *serial) | 1122 | struct timespec *t, unsigned int *serial) |
1109 | { | 1123 | { |
1110 | if (ctx) | 1124 | if (!ctx || !auditsc_get_stamp(ctx, t, serial)) { |
1111 | auditsc_get_stamp(ctx, t, serial); | ||
1112 | else { | ||
1113 | *t = CURRENT_TIME; | 1125 | *t = CURRENT_TIME; |
1114 | *serial = audit_serial(); | 1126 | *serial = audit_serial(); |
1115 | } | 1127 | } |
@@ -1146,7 +1158,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, | |||
1146 | int reserve; | 1158 | int reserve; |
1147 | unsigned long timeout_start = jiffies; | 1159 | unsigned long timeout_start = jiffies; |
1148 | 1160 | ||
1149 | if (!audit_initialized) | 1161 | if (audit_initialized != AUDIT_INITIALIZED) |
1150 | return NULL; | 1162 | return NULL; |
1151 | 1163 | ||
1152 | if (unlikely(audit_filter_type(type))) | 1164 | if (unlikely(audit_filter_type(type))) |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index cf5bc2f5f9c3..4819f3711973 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/highmem.h> | 65 | #include <linux/highmem.h> |
66 | #include <linux/syscalls.h> | 66 | #include <linux/syscalls.h> |
67 | #include <linux/inotify.h> | 67 | #include <linux/inotify.h> |
68 | #include <linux/capability.h> | ||
68 | 69 | ||
69 | #include "audit.h" | 70 | #include "audit.h" |
70 | 71 | ||
@@ -84,6 +85,15 @@ int audit_n_rules; | |||
84 | /* determines whether we collect data for signals sent */ | 85 | /* determines whether we collect data for signals sent */ |
85 | int audit_signals; | 86 | int audit_signals; |
86 | 87 | ||
88 | struct audit_cap_data { | ||
89 | kernel_cap_t permitted; | ||
90 | kernel_cap_t inheritable; | ||
91 | union { | ||
92 | unsigned int fE; /* effective bit of a file capability */ | ||
93 | kernel_cap_t effective; /* effective set of a process */ | ||
94 | }; | ||
95 | }; | ||
96 | |||
87 | /* When fs/namei.c:getname() is called, we store the pointer in name and | 97 | /* When fs/namei.c:getname() is called, we store the pointer in name and |
88 | * we don't let putname() free it (instead we free all of the saved | 98 | * we don't let putname() free it (instead we free all of the saved |
89 | * pointers at syscall exit time). | 99 | * pointers at syscall exit time). |
@@ -100,6 +110,8 @@ struct audit_names { | |||
100 | gid_t gid; | 110 | gid_t gid; |
101 | dev_t rdev; | 111 | dev_t rdev; |
102 | u32 osid; | 112 | u32 osid; |
113 | struct audit_cap_data fcap; | ||
114 | unsigned int fcap_ver; | ||
103 | }; | 115 | }; |
104 | 116 | ||
105 | struct audit_aux_data { | 117 | struct audit_aux_data { |
@@ -184,6 +196,20 @@ struct audit_aux_data_pids { | |||
184 | int pid_count; | 196 | int pid_count; |
185 | }; | 197 | }; |
186 | 198 | ||
199 | struct audit_aux_data_bprm_fcaps { | ||
200 | struct audit_aux_data d; | ||
201 | struct audit_cap_data fcap; | ||
202 | unsigned int fcap_ver; | ||
203 | struct audit_cap_data old_pcap; | ||
204 | struct audit_cap_data new_pcap; | ||
205 | }; | ||
206 | |||
207 | struct audit_aux_data_capset { | ||
208 | struct audit_aux_data d; | ||
209 | pid_t pid; | ||
210 | struct audit_cap_data cap; | ||
211 | }; | ||
212 | |||
187 | struct audit_tree_refs { | 213 | struct audit_tree_refs { |
188 | struct audit_tree_refs *next; | 214 | struct audit_tree_refs *next; |
189 | struct audit_chunk *c[31]; | 215 | struct audit_chunk *c[31]; |
@@ -421,6 +447,7 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
421 | struct audit_names *name, | 447 | struct audit_names *name, |
422 | enum audit_state *state) | 448 | enum audit_state *state) |
423 | { | 449 | { |
450 | const struct cred *cred = get_task_cred(tsk); | ||
424 | int i, j, need_sid = 1; | 451 | int i, j, need_sid = 1; |
425 | u32 sid; | 452 | u32 sid; |
426 | 453 | ||
@@ -440,28 +467,28 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
440 | } | 467 | } |
441 | break; | 468 | break; |
442 | case AUDIT_UID: | 469 | case AUDIT_UID: |
443 | result = audit_comparator(tsk->uid, f->op, f->val); | 470 | result = audit_comparator(cred->uid, f->op, f->val); |
444 | break; | 471 | break; |
445 | case AUDIT_EUID: | 472 | case AUDIT_EUID: |
446 | result = audit_comparator(tsk->euid, f->op, f->val); | 473 | result = audit_comparator(cred->euid, f->op, f->val); |
447 | break; | 474 | break; |
448 | case AUDIT_SUID: | 475 | case AUDIT_SUID: |
449 | result = audit_comparator(tsk->suid, f->op, f->val); | 476 | result = audit_comparator(cred->suid, f->op, f->val); |
450 | break; | 477 | break; |
451 | case AUDIT_FSUID: | 478 | case AUDIT_FSUID: |
452 | result = audit_comparator(tsk->fsuid, f->op, f->val); | 479 | result = audit_comparator(cred->fsuid, f->op, f->val); |
453 | break; | 480 | break; |
454 | case AUDIT_GID: | 481 | case AUDIT_GID: |
455 | result = audit_comparator(tsk->gid, f->op, f->val); | 482 | result = audit_comparator(cred->gid, f->op, f->val); |
456 | break; | 483 | break; |
457 | case AUDIT_EGID: | 484 | case AUDIT_EGID: |
458 | result = audit_comparator(tsk->egid, f->op, f->val); | 485 | result = audit_comparator(cred->egid, f->op, f->val); |
459 | break; | 486 | break; |
460 | case AUDIT_SGID: | 487 | case AUDIT_SGID: |
461 | result = audit_comparator(tsk->sgid, f->op, f->val); | 488 | result = audit_comparator(cred->sgid, f->op, f->val); |
462 | break; | 489 | break; |
463 | case AUDIT_FSGID: | 490 | case AUDIT_FSGID: |
464 | result = audit_comparator(tsk->fsgid, f->op, f->val); | 491 | result = audit_comparator(cred->fsgid, f->op, f->val); |
465 | break; | 492 | break; |
466 | case AUDIT_PERS: | 493 | case AUDIT_PERS: |
467 | result = audit_comparator(tsk->personality, f->op, f->val); | 494 | result = audit_comparator(tsk->personality, f->op, f->val); |
@@ -615,8 +642,10 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
615 | break; | 642 | break; |
616 | } | 643 | } |
617 | 644 | ||
618 | if (!result) | 645 | if (!result) { |
646 | put_cred(cred); | ||
619 | return 0; | 647 | return 0; |
648 | } | ||
620 | } | 649 | } |
621 | if (rule->filterkey && ctx) | 650 | if (rule->filterkey && ctx) |
622 | ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); | 651 | ctx->filterkey = kstrdup(rule->filterkey, GFP_ATOMIC); |
@@ -624,6 +653,7 @@ static int audit_filter_rules(struct task_struct *tsk, | |||
624 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; | 653 | case AUDIT_NEVER: *state = AUDIT_DISABLED; break; |
625 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; | 654 | case AUDIT_ALWAYS: *state = AUDIT_RECORD_CONTEXT; break; |
626 | } | 655 | } |
656 | put_cred(cred); | ||
627 | return 1; | 657 | return 1; |
628 | } | 658 | } |
629 | 659 | ||
@@ -1171,8 +1201,38 @@ static void audit_log_execve_info(struct audit_context *context, | |||
1171 | kfree(buf); | 1201 | kfree(buf); |
1172 | } | 1202 | } |
1173 | 1203 | ||
1204 | static void audit_log_cap(struct audit_buffer *ab, char *prefix, kernel_cap_t *cap) | ||
1205 | { | ||
1206 | int i; | ||
1207 | |||
1208 | audit_log_format(ab, " %s=", prefix); | ||
1209 | CAP_FOR_EACH_U32(i) { | ||
1210 | audit_log_format(ab, "%08x", cap->cap[(_KERNEL_CAPABILITY_U32S-1) - i]); | ||
1211 | } | ||
1212 | } | ||
1213 | |||
1214 | static void audit_log_fcaps(struct audit_buffer *ab, struct audit_names *name) | ||
1215 | { | ||
1216 | kernel_cap_t *perm = &name->fcap.permitted; | ||
1217 | kernel_cap_t *inh = &name->fcap.inheritable; | ||
1218 | int log = 0; | ||
1219 | |||
1220 | if (!cap_isclear(*perm)) { | ||
1221 | audit_log_cap(ab, "cap_fp", perm); | ||
1222 | log = 1; | ||
1223 | } | ||
1224 | if (!cap_isclear(*inh)) { | ||
1225 | audit_log_cap(ab, "cap_fi", inh); | ||
1226 | log = 1; | ||
1227 | } | ||
1228 | |||
1229 | if (log) | ||
1230 | audit_log_format(ab, " cap_fe=%d cap_fver=%x", name->fcap.fE, name->fcap_ver); | ||
1231 | } | ||
1232 | |||
1174 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) | 1233 | static void audit_log_exit(struct audit_context *context, struct task_struct *tsk) |
1175 | { | 1234 | { |
1235 | const struct cred *cred; | ||
1176 | int i, call_panic = 0; | 1236 | int i, call_panic = 0; |
1177 | struct audit_buffer *ab; | 1237 | struct audit_buffer *ab; |
1178 | struct audit_aux_data *aux; | 1238 | struct audit_aux_data *aux; |
@@ -1182,14 +1242,15 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1182 | context->pid = tsk->pid; | 1242 | context->pid = tsk->pid; |
1183 | if (!context->ppid) | 1243 | if (!context->ppid) |
1184 | context->ppid = sys_getppid(); | 1244 | context->ppid = sys_getppid(); |
1185 | context->uid = tsk->uid; | 1245 | cred = current_cred(); |
1186 | context->gid = tsk->gid; | 1246 | context->uid = cred->uid; |
1187 | context->euid = tsk->euid; | 1247 | context->gid = cred->gid; |
1188 | context->suid = tsk->suid; | 1248 | context->euid = cred->euid; |
1189 | context->fsuid = tsk->fsuid; | 1249 | context->suid = cred->suid; |
1190 | context->egid = tsk->egid; | 1250 | context->fsuid = cred->fsuid; |
1191 | context->sgid = tsk->sgid; | 1251 | context->egid = cred->egid; |
1192 | context->fsgid = tsk->fsgid; | 1252 | context->sgid = cred->sgid; |
1253 | context->fsgid = cred->fsgid; | ||
1193 | context->personality = tsk->personality; | 1254 | context->personality = tsk->personality; |
1194 | 1255 | ||
1195 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); | 1256 | ab = audit_log_start(context, GFP_KERNEL, AUDIT_SYSCALL); |
@@ -1334,6 +1395,28 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1334 | audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]); | 1395 | audit_log_format(ab, "fd0=%d fd1=%d", axs->fd[0], axs->fd[1]); |
1335 | break; } | 1396 | break; } |
1336 | 1397 | ||
1398 | case AUDIT_BPRM_FCAPS: { | ||
1399 | struct audit_aux_data_bprm_fcaps *axs = (void *)aux; | ||
1400 | audit_log_format(ab, "fver=%x", axs->fcap_ver); | ||
1401 | audit_log_cap(ab, "fp", &axs->fcap.permitted); | ||
1402 | audit_log_cap(ab, "fi", &axs->fcap.inheritable); | ||
1403 | audit_log_format(ab, " fe=%d", axs->fcap.fE); | ||
1404 | audit_log_cap(ab, "old_pp", &axs->old_pcap.permitted); | ||
1405 | audit_log_cap(ab, "old_pi", &axs->old_pcap.inheritable); | ||
1406 | audit_log_cap(ab, "old_pe", &axs->old_pcap.effective); | ||
1407 | audit_log_cap(ab, "new_pp", &axs->new_pcap.permitted); | ||
1408 | audit_log_cap(ab, "new_pi", &axs->new_pcap.inheritable); | ||
1409 | audit_log_cap(ab, "new_pe", &axs->new_pcap.effective); | ||
1410 | break; } | ||
1411 | |||
1412 | case AUDIT_CAPSET: { | ||
1413 | struct audit_aux_data_capset *axs = (void *)aux; | ||
1414 | audit_log_format(ab, "pid=%d", axs->pid); | ||
1415 | audit_log_cap(ab, "cap_pi", &axs->cap.inheritable); | ||
1416 | audit_log_cap(ab, "cap_pp", &axs->cap.permitted); | ||
1417 | audit_log_cap(ab, "cap_pe", &axs->cap.effective); | ||
1418 | break; } | ||
1419 | |||
1337 | } | 1420 | } |
1338 | audit_log_end(ab); | 1421 | audit_log_end(ab); |
1339 | } | 1422 | } |
@@ -1421,6 +1504,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1421 | } | 1504 | } |
1422 | } | 1505 | } |
1423 | 1506 | ||
1507 | audit_log_fcaps(ab, n); | ||
1508 | |||
1424 | audit_log_end(ab); | 1509 | audit_log_end(ab); |
1425 | } | 1510 | } |
1426 | 1511 | ||
@@ -1459,7 +1544,6 @@ void audit_free(struct task_struct *tsk) | |||
1459 | 1544 | ||
1460 | /** | 1545 | /** |
1461 | * audit_syscall_entry - fill in an audit record at syscall entry | 1546 | * audit_syscall_entry - fill in an audit record at syscall entry |
1462 | * @tsk: task being audited | ||
1463 | * @arch: architecture type | 1547 | * @arch: architecture type |
1464 | * @major: major syscall type (function) | 1548 | * @major: major syscall type (function) |
1465 | * @a1: additional syscall register 1 | 1549 | * @a1: additional syscall register 1 |
@@ -1548,9 +1632,25 @@ void audit_syscall_entry(int arch, int major, | |||
1548 | context->ppid = 0; | 1632 | context->ppid = 0; |
1549 | } | 1633 | } |
1550 | 1634 | ||
1635 | void audit_finish_fork(struct task_struct *child) | ||
1636 | { | ||
1637 | struct audit_context *ctx = current->audit_context; | ||
1638 | struct audit_context *p = child->audit_context; | ||
1639 | if (!p || !ctx || !ctx->auditable) | ||
1640 | return; | ||
1641 | p->arch = ctx->arch; | ||
1642 | p->major = ctx->major; | ||
1643 | memcpy(p->argv, ctx->argv, sizeof(ctx->argv)); | ||
1644 | p->ctime = ctx->ctime; | ||
1645 | p->dummy = ctx->dummy; | ||
1646 | p->auditable = ctx->auditable; | ||
1647 | p->in_syscall = ctx->in_syscall; | ||
1648 | p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL); | ||
1649 | p->ppid = current->pid; | ||
1650 | } | ||
1651 | |||
1551 | /** | 1652 | /** |
1552 | * audit_syscall_exit - deallocate audit context after a system call | 1653 | * audit_syscall_exit - deallocate audit context after a system call |
1553 | * @tsk: task being audited | ||
1554 | * @valid: success/failure flag | 1654 | * @valid: success/failure flag |
1555 | * @return_code: syscall return value | 1655 | * @return_code: syscall return value |
1556 | * | 1656 | * |
@@ -1787,8 +1887,36 @@ static int audit_inc_name_count(struct audit_context *context, | |||
1787 | return 0; | 1887 | return 0; |
1788 | } | 1888 | } |
1789 | 1889 | ||
1890 | |||
1891 | static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry) | ||
1892 | { | ||
1893 | struct cpu_vfs_cap_data caps; | ||
1894 | int rc; | ||
1895 | |||
1896 | memset(&name->fcap.permitted, 0, sizeof(kernel_cap_t)); | ||
1897 | memset(&name->fcap.inheritable, 0, sizeof(kernel_cap_t)); | ||
1898 | name->fcap.fE = 0; | ||
1899 | name->fcap_ver = 0; | ||
1900 | |||
1901 | if (!dentry) | ||
1902 | return 0; | ||
1903 | |||
1904 | rc = get_vfs_caps_from_disk(dentry, &caps); | ||
1905 | if (rc) | ||
1906 | return rc; | ||
1907 | |||
1908 | name->fcap.permitted = caps.permitted; | ||
1909 | name->fcap.inheritable = caps.inheritable; | ||
1910 | name->fcap.fE = !!(caps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); | ||
1911 | name->fcap_ver = (caps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; | ||
1912 | |||
1913 | return 0; | ||
1914 | } | ||
1915 | |||
1916 | |||
1790 | /* Copy inode data into an audit_names. */ | 1917 | /* Copy inode data into an audit_names. */ |
1791 | static void audit_copy_inode(struct audit_names *name, const struct inode *inode) | 1918 | static void audit_copy_inode(struct audit_names *name, const struct dentry *dentry, |
1919 | const struct inode *inode) | ||
1792 | { | 1920 | { |
1793 | name->ino = inode->i_ino; | 1921 | name->ino = inode->i_ino; |
1794 | name->dev = inode->i_sb->s_dev; | 1922 | name->dev = inode->i_sb->s_dev; |
@@ -1797,6 +1925,7 @@ static void audit_copy_inode(struct audit_names *name, const struct inode *inode | |||
1797 | name->gid = inode->i_gid; | 1925 | name->gid = inode->i_gid; |
1798 | name->rdev = inode->i_rdev; | 1926 | name->rdev = inode->i_rdev; |
1799 | security_inode_getsecid(inode, &name->osid); | 1927 | security_inode_getsecid(inode, &name->osid); |
1928 | audit_copy_fcaps(name, dentry); | ||
1800 | } | 1929 | } |
1801 | 1930 | ||
1802 | /** | 1931 | /** |
@@ -1831,7 +1960,7 @@ void __audit_inode(const char *name, const struct dentry *dentry) | |||
1831 | context->names[idx].name = NULL; | 1960 | context->names[idx].name = NULL; |
1832 | } | 1961 | } |
1833 | handle_path(dentry); | 1962 | handle_path(dentry); |
1834 | audit_copy_inode(&context->names[idx], inode); | 1963 | audit_copy_inode(&context->names[idx], dentry, inode); |
1835 | } | 1964 | } |
1836 | 1965 | ||
1837 | /** | 1966 | /** |
@@ -1892,7 +2021,7 @@ void __audit_inode_child(const char *dname, const struct dentry *dentry, | |||
1892 | if (!strcmp(dname, n->name) || | 2021 | if (!strcmp(dname, n->name) || |
1893 | !audit_compare_dname_path(dname, n->name, &dirlen)) { | 2022 | !audit_compare_dname_path(dname, n->name, &dirlen)) { |
1894 | if (inode) | 2023 | if (inode) |
1895 | audit_copy_inode(n, inode); | 2024 | audit_copy_inode(n, NULL, inode); |
1896 | else | 2025 | else |
1897 | n->ino = (unsigned long)-1; | 2026 | n->ino = (unsigned long)-1; |
1898 | found_child = n->name; | 2027 | found_child = n->name; |
@@ -1906,7 +2035,7 @@ add_names: | |||
1906 | return; | 2035 | return; |
1907 | idx = context->name_count - 1; | 2036 | idx = context->name_count - 1; |
1908 | context->names[idx].name = NULL; | 2037 | context->names[idx].name = NULL; |
1909 | audit_copy_inode(&context->names[idx], parent); | 2038 | audit_copy_inode(&context->names[idx], NULL, parent); |
1910 | } | 2039 | } |
1911 | 2040 | ||
1912 | if (!found_child) { | 2041 | if (!found_child) { |
@@ -1927,7 +2056,7 @@ add_names: | |||
1927 | } | 2056 | } |
1928 | 2057 | ||
1929 | if (inode) | 2058 | if (inode) |
1930 | audit_copy_inode(&context->names[idx], inode); | 2059 | audit_copy_inode(&context->names[idx], NULL, inode); |
1931 | else | 2060 | else |
1932 | context->names[idx].ino = (unsigned long)-1; | 2061 | context->names[idx].ino = (unsigned long)-1; |
1933 | } | 2062 | } |
@@ -1942,15 +2071,18 @@ EXPORT_SYMBOL_GPL(__audit_inode_child); | |||
1942 | * | 2071 | * |
1943 | * Also sets the context as auditable. | 2072 | * Also sets the context as auditable. |
1944 | */ | 2073 | */ |
1945 | void auditsc_get_stamp(struct audit_context *ctx, | 2074 | int auditsc_get_stamp(struct audit_context *ctx, |
1946 | struct timespec *t, unsigned int *serial) | 2075 | struct timespec *t, unsigned int *serial) |
1947 | { | 2076 | { |
2077 | if (!ctx->in_syscall) | ||
2078 | return 0; | ||
1948 | if (!ctx->serial) | 2079 | if (!ctx->serial) |
1949 | ctx->serial = audit_serial(); | 2080 | ctx->serial = audit_serial(); |
1950 | t->tv_sec = ctx->ctime.tv_sec; | 2081 | t->tv_sec = ctx->ctime.tv_sec; |
1951 | t->tv_nsec = ctx->ctime.tv_nsec; | 2082 | t->tv_nsec = ctx->ctime.tv_nsec; |
1952 | *serial = ctx->serial; | 2083 | *serial = ctx->serial; |
1953 | ctx->auditable = 1; | 2084 | ctx->auditable = 1; |
2085 | return 1; | ||
1954 | } | 2086 | } |
1955 | 2087 | ||
1956 | /* global counter which is incremented every time something logs in */ | 2088 | /* global counter which is incremented every time something logs in */ |
@@ -1978,7 +2110,7 @@ int audit_set_loginuid(struct task_struct *task, uid_t loginuid) | |||
1978 | audit_log_format(ab, "login pid=%d uid=%u " | 2110 | audit_log_format(ab, "login pid=%d uid=%u " |
1979 | "old auid=%u new auid=%u" | 2111 | "old auid=%u new auid=%u" |
1980 | " old ses=%u new ses=%u", | 2112 | " old ses=%u new ses=%u", |
1981 | task->pid, task->uid, | 2113 | task->pid, task_uid(task), |
1982 | task->loginuid, loginuid, | 2114 | task->loginuid, loginuid, |
1983 | task->sessionid, sessionid); | 2115 | task->sessionid, sessionid); |
1984 | audit_log_end(ab); | 2116 | audit_log_end(ab); |
@@ -2361,7 +2493,7 @@ void __audit_ptrace(struct task_struct *t) | |||
2361 | 2493 | ||
2362 | context->target_pid = t->pid; | 2494 | context->target_pid = t->pid; |
2363 | context->target_auid = audit_get_loginuid(t); | 2495 | context->target_auid = audit_get_loginuid(t); |
2364 | context->target_uid = t->uid; | 2496 | context->target_uid = task_uid(t); |
2365 | context->target_sessionid = audit_get_sessionid(t); | 2497 | context->target_sessionid = audit_get_sessionid(t); |
2366 | security_task_getsecid(t, &context->target_sid); | 2498 | security_task_getsecid(t, &context->target_sid); |
2367 | memcpy(context->target_comm, t->comm, TASK_COMM_LEN); | 2499 | memcpy(context->target_comm, t->comm, TASK_COMM_LEN); |
@@ -2380,6 +2512,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2380 | struct audit_aux_data_pids *axp; | 2512 | struct audit_aux_data_pids *axp; |
2381 | struct task_struct *tsk = current; | 2513 | struct task_struct *tsk = current; |
2382 | struct audit_context *ctx = tsk->audit_context; | 2514 | struct audit_context *ctx = tsk->audit_context; |
2515 | uid_t uid = current_uid(), t_uid = task_uid(t); | ||
2383 | 2516 | ||
2384 | if (audit_pid && t->tgid == audit_pid) { | 2517 | if (audit_pid && t->tgid == audit_pid) { |
2385 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { | 2518 | if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1 || sig == SIGUSR2) { |
@@ -2387,7 +2520,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2387 | if (tsk->loginuid != -1) | 2520 | if (tsk->loginuid != -1) |
2388 | audit_sig_uid = tsk->loginuid; | 2521 | audit_sig_uid = tsk->loginuid; |
2389 | else | 2522 | else |
2390 | audit_sig_uid = tsk->uid; | 2523 | audit_sig_uid = uid; |
2391 | security_task_getsecid(tsk, &audit_sig_sid); | 2524 | security_task_getsecid(tsk, &audit_sig_sid); |
2392 | } | 2525 | } |
2393 | if (!audit_signals || audit_dummy_context()) | 2526 | if (!audit_signals || audit_dummy_context()) |
@@ -2399,7 +2532,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2399 | if (!ctx->target_pid) { | 2532 | if (!ctx->target_pid) { |
2400 | ctx->target_pid = t->tgid; | 2533 | ctx->target_pid = t->tgid; |
2401 | ctx->target_auid = audit_get_loginuid(t); | 2534 | ctx->target_auid = audit_get_loginuid(t); |
2402 | ctx->target_uid = t->uid; | 2535 | ctx->target_uid = t_uid; |
2403 | ctx->target_sessionid = audit_get_sessionid(t); | 2536 | ctx->target_sessionid = audit_get_sessionid(t); |
2404 | security_task_getsecid(t, &ctx->target_sid); | 2537 | security_task_getsecid(t, &ctx->target_sid); |
2405 | memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); | 2538 | memcpy(ctx->target_comm, t->comm, TASK_COMM_LEN); |
@@ -2420,7 +2553,7 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2420 | 2553 | ||
2421 | axp->target_pid[axp->pid_count] = t->tgid; | 2554 | axp->target_pid[axp->pid_count] = t->tgid; |
2422 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); | 2555 | axp->target_auid[axp->pid_count] = audit_get_loginuid(t); |
2423 | axp->target_uid[axp->pid_count] = t->uid; | 2556 | axp->target_uid[axp->pid_count] = t_uid; |
2424 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); | 2557 | axp->target_sessionid[axp->pid_count] = audit_get_sessionid(t); |
2425 | security_task_getsecid(t, &axp->target_sid[axp->pid_count]); | 2558 | security_task_getsecid(t, &axp->target_sid[axp->pid_count]); |
2426 | memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); | 2559 | memcpy(axp->target_comm[axp->pid_count], t->comm, TASK_COMM_LEN); |
@@ -2430,6 +2563,86 @@ int __audit_signal_info(int sig, struct task_struct *t) | |||
2430 | } | 2563 | } |
2431 | 2564 | ||
2432 | /** | 2565 | /** |
2566 | * __audit_log_bprm_fcaps - store information about a loading bprm and relevant fcaps | ||
2567 | * @bprm: pointer to the bprm being processed | ||
2568 | * @new: the proposed new credentials | ||
2569 | * @old: the old credentials | ||
2570 | * | ||
2571 | * Simply check if the proc already has the caps given by the file and if not | ||
2572 | * store the priv escalation info for later auditing at the end of the syscall | ||
2573 | * | ||
2574 | * -Eric | ||
2575 | */ | ||
2576 | int __audit_log_bprm_fcaps(struct linux_binprm *bprm, | ||
2577 | const struct cred *new, const struct cred *old) | ||
2578 | { | ||
2579 | struct audit_aux_data_bprm_fcaps *ax; | ||
2580 | struct audit_context *context = current->audit_context; | ||
2581 | struct cpu_vfs_cap_data vcaps; | ||
2582 | struct dentry *dentry; | ||
2583 | |||
2584 | ax = kmalloc(sizeof(*ax), GFP_KERNEL); | ||
2585 | if (!ax) | ||
2586 | return -ENOMEM; | ||
2587 | |||
2588 | ax->d.type = AUDIT_BPRM_FCAPS; | ||
2589 | ax->d.next = context->aux; | ||
2590 | context->aux = (void *)ax; | ||
2591 | |||
2592 | dentry = dget(bprm->file->f_dentry); | ||
2593 | get_vfs_caps_from_disk(dentry, &vcaps); | ||
2594 | dput(dentry); | ||
2595 | |||
2596 | ax->fcap.permitted = vcaps.permitted; | ||
2597 | ax->fcap.inheritable = vcaps.inheritable; | ||
2598 | ax->fcap.fE = !!(vcaps.magic_etc & VFS_CAP_FLAGS_EFFECTIVE); | ||
2599 | ax->fcap_ver = (vcaps.magic_etc & VFS_CAP_REVISION_MASK) >> VFS_CAP_REVISION_SHIFT; | ||
2600 | |||
2601 | ax->old_pcap.permitted = old->cap_permitted; | ||
2602 | ax->old_pcap.inheritable = old->cap_inheritable; | ||
2603 | ax->old_pcap.effective = old->cap_effective; | ||
2604 | |||
2605 | ax->new_pcap.permitted = new->cap_permitted; | ||
2606 | ax->new_pcap.inheritable = new->cap_inheritable; | ||
2607 | ax->new_pcap.effective = new->cap_effective; | ||
2608 | return 0; | ||
2609 | } | ||
2610 | |||
2611 | /** | ||
2612 | * __audit_log_capset - store information about the arguments to the capset syscall | ||
2613 | * @pid: target pid of the capset call | ||
2614 | * @new: the new credentials | ||
2615 | * @old: the old (current) credentials | ||
2616 | * | ||
2617 | * Record the aguments userspace sent to sys_capset for later printing by the | ||
2618 | * audit system if applicable | ||
2619 | */ | ||
2620 | int __audit_log_capset(pid_t pid, | ||
2621 | const struct cred *new, const struct cred *old) | ||
2622 | { | ||
2623 | struct audit_aux_data_capset *ax; | ||
2624 | struct audit_context *context = current->audit_context; | ||
2625 | |||
2626 | if (likely(!audit_enabled || !context || context->dummy)) | ||
2627 | return 0; | ||
2628 | |||
2629 | ax = kmalloc(sizeof(*ax), GFP_KERNEL); | ||
2630 | if (!ax) | ||
2631 | return -ENOMEM; | ||
2632 | |||
2633 | ax->d.type = AUDIT_CAPSET; | ||
2634 | ax->d.next = context->aux; | ||
2635 | context->aux = (void *)ax; | ||
2636 | |||
2637 | ax->pid = pid; | ||
2638 | ax->cap.effective = new->cap_effective; | ||
2639 | ax->cap.inheritable = new->cap_effective; | ||
2640 | ax->cap.permitted = new->cap_permitted; | ||
2641 | |||
2642 | return 0; | ||
2643 | } | ||
2644 | |||
2645 | /** | ||
2433 | * audit_core_dumps - record information about processes that end abnormally | 2646 | * audit_core_dumps - record information about processes that end abnormally |
2434 | * @signr: signal value | 2647 | * @signr: signal value |
2435 | * | 2648 | * |
@@ -2440,7 +2653,8 @@ void audit_core_dumps(long signr) | |||
2440 | { | 2653 | { |
2441 | struct audit_buffer *ab; | 2654 | struct audit_buffer *ab; |
2442 | u32 sid; | 2655 | u32 sid; |
2443 | uid_t auid = audit_get_loginuid(current); | 2656 | uid_t auid = audit_get_loginuid(current), uid; |
2657 | gid_t gid; | ||
2444 | unsigned int sessionid = audit_get_sessionid(current); | 2658 | unsigned int sessionid = audit_get_sessionid(current); |
2445 | 2659 | ||
2446 | if (!audit_enabled) | 2660 | if (!audit_enabled) |
@@ -2450,8 +2664,9 @@ void audit_core_dumps(long signr) | |||
2450 | return; | 2664 | return; |
2451 | 2665 | ||
2452 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); | 2666 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND); |
2667 | current_uid_gid(&uid, &gid); | ||
2453 | audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", | 2668 | audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u", |
2454 | auid, current->uid, current->gid, sessionid); | 2669 | auid, uid, gid, sessionid); |
2455 | security_task_getsecid(current, &sid); | 2670 | security_task_getsecid(current, &sid); |
2456 | if (sid) { | 2671 | if (sid) { |
2457 | char *ctx = NULL; | 2672 | char *ctx = NULL; |
diff --git a/kernel/capability.c b/kernel/capability.c index 33e51e78c2d8..36b4b4daebec 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> | 7 | * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/audit.h> | ||
10 | #include <linux/capability.h> | 11 | #include <linux/capability.h> |
11 | #include <linux/mm.h> | 12 | #include <linux/mm.h> |
12 | #include <linux/module.h> | 13 | #include <linux/module.h> |
@@ -14,12 +15,7 @@ | |||
14 | #include <linux/syscalls.h> | 15 | #include <linux/syscalls.h> |
15 | #include <linux/pid_namespace.h> | 16 | #include <linux/pid_namespace.h> |
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
17 | 18 | #include "cred-internals.h" | |
18 | /* | ||
19 | * This lock protects task->cap_* for all tasks including current. | ||
20 | * Locking rule: acquire this prior to tasklist_lock. | ||
21 | */ | ||
22 | static DEFINE_SPINLOCK(task_capability_lock); | ||
23 | 19 | ||
24 | /* | 20 | /* |
25 | * Leveraged for setting/resetting capabilities | 21 | * Leveraged for setting/resetting capabilities |
@@ -33,6 +29,17 @@ EXPORT_SYMBOL(__cap_empty_set); | |||
33 | EXPORT_SYMBOL(__cap_full_set); | 29 | EXPORT_SYMBOL(__cap_full_set); |
34 | EXPORT_SYMBOL(__cap_init_eff_set); | 30 | EXPORT_SYMBOL(__cap_init_eff_set); |
35 | 31 | ||
32 | #ifdef CONFIG_SECURITY_FILE_CAPABILITIES | ||
33 | int file_caps_enabled = 1; | ||
34 | |||
35 | static int __init file_caps_disable(char *str) | ||
36 | { | ||
37 | file_caps_enabled = 0; | ||
38 | return 1; | ||
39 | } | ||
40 | __setup("no_file_caps", file_caps_disable); | ||
41 | #endif | ||
42 | |||
36 | /* | 43 | /* |
37 | * More recent versions of libcap are available from: | 44 | * More recent versions of libcap are available from: |
38 | * | 45 | * |
@@ -115,167 +122,12 @@ static int cap_validate_magic(cap_user_header_t header, unsigned *tocopy) | |||
115 | return 0; | 122 | return 0; |
116 | } | 123 | } |
117 | 124 | ||
118 | #ifndef CONFIG_SECURITY_FILE_CAPABILITIES | ||
119 | |||
120 | /* | ||
121 | * Without filesystem capability support, we nominally support one process | ||
122 | * setting the capabilities of another | ||
123 | */ | ||
124 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | ||
125 | kernel_cap_t *pIp, kernel_cap_t *pPp) | ||
126 | { | ||
127 | struct task_struct *target; | ||
128 | int ret; | ||
129 | |||
130 | spin_lock(&task_capability_lock); | ||
131 | read_lock(&tasklist_lock); | ||
132 | |||
133 | if (pid && pid != task_pid_vnr(current)) { | ||
134 | target = find_task_by_vpid(pid); | ||
135 | if (!target) { | ||
136 | ret = -ESRCH; | ||
137 | goto out; | ||
138 | } | ||
139 | } else | ||
140 | target = current; | ||
141 | |||
142 | ret = security_capget(target, pEp, pIp, pPp); | ||
143 | |||
144 | out: | ||
145 | read_unlock(&tasklist_lock); | ||
146 | spin_unlock(&task_capability_lock); | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * cap_set_pg - set capabilities for all processes in a given process | ||
153 | * group. We call this holding task_capability_lock and tasklist_lock. | ||
154 | */ | ||
155 | static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective, | ||
156 | kernel_cap_t *inheritable, | ||
157 | kernel_cap_t *permitted) | ||
158 | { | ||
159 | struct task_struct *g, *target; | ||
160 | int ret = -EPERM; | ||
161 | int found = 0; | ||
162 | struct pid *pgrp; | ||
163 | |||
164 | spin_lock(&task_capability_lock); | ||
165 | read_lock(&tasklist_lock); | ||
166 | |||
167 | pgrp = find_vpid(pgrp_nr); | ||
168 | do_each_pid_task(pgrp, PIDTYPE_PGID, g) { | ||
169 | target = g; | ||
170 | while_each_thread(g, target) { | ||
171 | if (!security_capset_check(target, effective, | ||
172 | inheritable, permitted)) { | ||
173 | security_capset_set(target, effective, | ||
174 | inheritable, permitted); | ||
175 | ret = 0; | ||
176 | } | ||
177 | found = 1; | ||
178 | } | ||
179 | } while_each_pid_task(pgrp, PIDTYPE_PGID, g); | ||
180 | |||
181 | read_unlock(&tasklist_lock); | ||
182 | spin_unlock(&task_capability_lock); | ||
183 | |||
184 | if (!found) | ||
185 | ret = 0; | ||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | /* | ||
190 | * cap_set_all - set capabilities for all processes other than init | ||
191 | * and self. We call this holding task_capability_lock and tasklist_lock. | ||
192 | */ | ||
193 | static inline int cap_set_all(kernel_cap_t *effective, | ||
194 | kernel_cap_t *inheritable, | ||
195 | kernel_cap_t *permitted) | ||
196 | { | ||
197 | struct task_struct *g, *target; | ||
198 | int ret = -EPERM; | ||
199 | int found = 0; | ||
200 | |||
201 | spin_lock(&task_capability_lock); | ||
202 | read_lock(&tasklist_lock); | ||
203 | |||
204 | do_each_thread(g, target) { | ||
205 | if (target == current | ||
206 | || is_container_init(target->group_leader)) | ||
207 | continue; | ||
208 | found = 1; | ||
209 | if (security_capset_check(target, effective, inheritable, | ||
210 | permitted)) | ||
211 | continue; | ||
212 | ret = 0; | ||
213 | security_capset_set(target, effective, inheritable, permitted); | ||
214 | } while_each_thread(g, target); | ||
215 | |||
216 | read_unlock(&tasklist_lock); | ||
217 | spin_unlock(&task_capability_lock); | ||
218 | |||
219 | if (!found) | ||
220 | ret = 0; | ||
221 | |||
222 | return ret; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Given the target pid does not refer to the current process we | ||
227 | * need more elaborate support... (This support is not present when | ||
228 | * filesystem capabilities are configured.) | ||
229 | */ | ||
230 | static inline int do_sys_capset_other_tasks(pid_t pid, kernel_cap_t *effective, | ||
231 | kernel_cap_t *inheritable, | ||
232 | kernel_cap_t *permitted) | ||
233 | { | ||
234 | struct task_struct *target; | ||
235 | int ret; | ||
236 | |||
237 | if (!capable(CAP_SETPCAP)) | ||
238 | return -EPERM; | ||
239 | |||
240 | if (pid == -1) /* all procs other than current and init */ | ||
241 | return cap_set_all(effective, inheritable, permitted); | ||
242 | |||
243 | else if (pid < 0) /* all procs in process group */ | ||
244 | return cap_set_pg(-pid, effective, inheritable, permitted); | ||
245 | |||
246 | /* target != current */ | ||
247 | spin_lock(&task_capability_lock); | ||
248 | read_lock(&tasklist_lock); | ||
249 | |||
250 | target = find_task_by_vpid(pid); | ||
251 | if (!target) | ||
252 | ret = -ESRCH; | ||
253 | else { | ||
254 | ret = security_capset_check(target, effective, inheritable, | ||
255 | permitted); | ||
256 | |||
257 | /* having verified that the proposed changes are legal, | ||
258 | we now put them into effect. */ | ||
259 | if (!ret) | ||
260 | security_capset_set(target, effective, inheritable, | ||
261 | permitted); | ||
262 | } | ||
263 | |||
264 | read_unlock(&tasklist_lock); | ||
265 | spin_unlock(&task_capability_lock); | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | #else /* ie., def CONFIG_SECURITY_FILE_CAPABILITIES */ | ||
271 | |||
272 | /* | 125 | /* |
273 | * If we have configured with filesystem capability support, then the | 126 | * The only thing that can change the capabilities of the current |
274 | * only thing that can change the capabilities of the current process | 127 | * process is the current process. As such, we can't be in this code |
275 | * is the current process. As such, we can't be in this code at the | 128 | * at the same time as we are in the process of setting capabilities |
276 | * same time as we are in the process of setting capabilities in this | 129 | * in this process. The net result is that we can limit our use of |
277 | * process. The net result is that we can limit our use of locks to | 130 | * locks to when we are reading the caps of another process. |
278 | * when we are reading the caps of another process. | ||
279 | */ | 131 | */ |
280 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | 132 | static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, |
281 | kernel_cap_t *pIp, kernel_cap_t *pPp) | 133 | kernel_cap_t *pIp, kernel_cap_t *pPp) |
@@ -285,7 +137,6 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | |||
285 | if (pid && (pid != task_pid_vnr(current))) { | 137 | if (pid && (pid != task_pid_vnr(current))) { |
286 | struct task_struct *target; | 138 | struct task_struct *target; |
287 | 139 | ||
288 | spin_lock(&task_capability_lock); | ||
289 | read_lock(&tasklist_lock); | 140 | read_lock(&tasklist_lock); |
290 | 141 | ||
291 | target = find_task_by_vpid(pid); | 142 | target = find_task_by_vpid(pid); |
@@ -295,50 +146,12 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp, | |||
295 | ret = security_capget(target, pEp, pIp, pPp); | 146 | ret = security_capget(target, pEp, pIp, pPp); |
296 | 147 | ||
297 | read_unlock(&tasklist_lock); | 148 | read_unlock(&tasklist_lock); |
298 | spin_unlock(&task_capability_lock); | ||
299 | } else | 149 | } else |
300 | ret = security_capget(current, pEp, pIp, pPp); | 150 | ret = security_capget(current, pEp, pIp, pPp); |
301 | 151 | ||
302 | return ret; | 152 | return ret; |
303 | } | 153 | } |
304 | 154 | ||
305 | /* | ||
306 | * With filesystem capability support configured, the kernel does not | ||
307 | * permit the changing of capabilities in one process by another | ||
308 | * process. (CAP_SETPCAP has much less broad semantics when configured | ||
309 | * this way.) | ||
310 | */ | ||
311 | static inline int do_sys_capset_other_tasks(pid_t pid, | ||
312 | kernel_cap_t *effective, | ||
313 | kernel_cap_t *inheritable, | ||
314 | kernel_cap_t *permitted) | ||
315 | { | ||
316 | return -EPERM; | ||
317 | } | ||
318 | |||
319 | #endif /* ie., ndef CONFIG_SECURITY_FILE_CAPABILITIES */ | ||
320 | |||
321 | /* | ||
322 | * Atomically modify the effective capabilities returning the original | ||
323 | * value. No permission check is performed here - it is assumed that the | ||
324 | * caller is permitted to set the desired effective capabilities. | ||
325 | */ | ||
326 | kernel_cap_t cap_set_effective(const kernel_cap_t pE_new) | ||
327 | { | ||
328 | kernel_cap_t pE_old; | ||
329 | |||
330 | spin_lock(&task_capability_lock); | ||
331 | |||
332 | pE_old = current->cap_effective; | ||
333 | current->cap_effective = pE_new; | ||
334 | |||
335 | spin_unlock(&task_capability_lock); | ||
336 | |||
337 | return pE_old; | ||
338 | } | ||
339 | |||
340 | EXPORT_SYMBOL(cap_set_effective); | ||
341 | |||
342 | /** | 155 | /** |
343 | * sys_capget - get the capabilities of a given process. | 156 | * sys_capget - get the capabilities of a given process. |
344 | * @header: pointer to struct that contains capability version and | 157 | * @header: pointer to struct that contains capability version and |
@@ -366,7 +179,6 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
366 | return -EINVAL; | 179 | return -EINVAL; |
367 | 180 | ||
368 | ret = cap_get_target_pid(pid, &pE, &pI, &pP); | 181 | ret = cap_get_target_pid(pid, &pE, &pI, &pP); |
369 | |||
370 | if (!ret) { | 182 | if (!ret) { |
371 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 183 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
372 | unsigned i; | 184 | unsigned i; |
@@ -412,16 +224,14 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) | |||
412 | * @data: pointer to struct that contains the effective, permitted, | 224 | * @data: pointer to struct that contains the effective, permitted, |
413 | * and inheritable capabilities | 225 | * and inheritable capabilities |
414 | * | 226 | * |
415 | * Set capabilities for a given process, all processes, or all | 227 | * Set capabilities for the current process only. The ability to any other |
416 | * processes in a given process group. | 228 | * process(es) has been deprecated and removed. |
417 | * | 229 | * |
418 | * The restrictions on setting capabilities are specified as: | 230 | * The restrictions on setting capabilities are specified as: |
419 | * | 231 | * |
420 | * [pid is for the 'target' task. 'current' is the calling task.] | 232 | * I: any raised capabilities must be a subset of the old permitted |
421 | * | 233 | * P: any raised capabilities must be a subset of the old permitted |
422 | * I: any raised capabilities must be a subset of the (old current) permitted | 234 | * E: must be set to a subset of new permitted |
423 | * P: any raised capabilities must be a subset of the (old current) permitted | ||
424 | * E: must be set to a subset of (new target) permitted | ||
425 | * | 235 | * |
426 | * Returns 0 on success and < 0 on error. | 236 | * Returns 0 on success and < 0 on error. |
427 | */ | 237 | */ |
@@ -430,6 +240,7 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
430 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; | 240 | struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; |
431 | unsigned i, tocopy; | 241 | unsigned i, tocopy; |
432 | kernel_cap_t inheritable, permitted, effective; | 242 | kernel_cap_t inheritable, permitted, effective; |
243 | struct cred *new; | ||
433 | int ret; | 244 | int ret; |
434 | pid_t pid; | 245 | pid_t pid; |
435 | 246 | ||
@@ -440,10 +251,13 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
440 | if (get_user(pid, &header->pid)) | 251 | if (get_user(pid, &header->pid)) |
441 | return -EFAULT; | 252 | return -EFAULT; |
442 | 253 | ||
443 | if (copy_from_user(&kdata, data, tocopy | 254 | /* may only affect current now */ |
444 | * sizeof(struct __user_cap_data_struct))) { | 255 | if (pid != 0 && pid != task_pid_vnr(current)) |
256 | return -EPERM; | ||
257 | |||
258 | if (copy_from_user(&kdata, data, | ||
259 | tocopy * sizeof(struct __user_cap_data_struct))) | ||
445 | return -EFAULT; | 260 | return -EFAULT; |
446 | } | ||
447 | 261 | ||
448 | for (i = 0; i < tocopy; i++) { | 262 | for (i = 0; i < tocopy; i++) { |
449 | effective.cap[i] = kdata[i].effective; | 263 | effective.cap[i] = kdata[i].effective; |
@@ -457,32 +271,23 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
457 | i++; | 271 | i++; |
458 | } | 272 | } |
459 | 273 | ||
460 | if (pid && (pid != task_pid_vnr(current))) | 274 | new = prepare_creds(); |
461 | ret = do_sys_capset_other_tasks(pid, &effective, &inheritable, | 275 | if (!new) |
462 | &permitted); | 276 | return -ENOMEM; |
463 | else { | ||
464 | /* | ||
465 | * This lock is required even when filesystem | ||
466 | * capability support is configured - it protects the | ||
467 | * sys_capget() call from returning incorrect data in | ||
468 | * the case that the targeted process is not the | ||
469 | * current one. | ||
470 | */ | ||
471 | spin_lock(&task_capability_lock); | ||
472 | 277 | ||
473 | ret = security_capset_check(current, &effective, &inheritable, | 278 | ret = security_capset(new, current_cred(), |
474 | &permitted); | 279 | &effective, &inheritable, &permitted); |
475 | /* | 280 | if (ret < 0) |
476 | * Having verified that the proposed changes are | 281 | goto error; |
477 | * legal, we now put them into effect. | 282 | |
478 | */ | 283 | ret = audit_log_capset(pid, new, current_cred()); |
479 | if (!ret) | 284 | if (ret < 0) |
480 | security_capset_set(current, &effective, &inheritable, | 285 | return ret; |
481 | &permitted); | ||
482 | spin_unlock(&task_capability_lock); | ||
483 | } | ||
484 | 286 | ||
287 | return commit_creds(new); | ||
485 | 288 | ||
289 | error: | ||
290 | abort_creds(new); | ||
486 | return ret; | 291 | return ret; |
487 | } | 292 | } |
488 | 293 | ||
@@ -498,6 +303,11 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) | |||
498 | */ | 303 | */ |
499 | int capable(int cap) | 304 | int capable(int cap) |
500 | { | 305 | { |
306 | if (unlikely(!cap_valid(cap))) { | ||
307 | printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap); | ||
308 | BUG(); | ||
309 | } | ||
310 | |||
501 | if (has_capability(current, cap)) { | 311 | if (has_capability(current, cap)) { |
502 | current->flags |= PF_SUPERPRIV; | 312 | current->flags |= PF_SUPERPRIV; |
503 | return 1; | 313 | return 1; |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index fe00b3b983a8..48348dde6d81 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -571,8 +571,8 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) | |||
571 | 571 | ||
572 | if (inode) { | 572 | if (inode) { |
573 | inode->i_mode = mode; | 573 | inode->i_mode = mode; |
574 | inode->i_uid = current->fsuid; | 574 | inode->i_uid = current_fsuid(); |
575 | inode->i_gid = current->fsgid; | 575 | inode->i_gid = current_fsgid(); |
576 | inode->i_blocks = 0; | 576 | inode->i_blocks = 0; |
577 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 577 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
578 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; | 578 | inode->i_mapping->backing_dev_info = &cgroup_backing_dev_info; |
@@ -702,7 +702,7 @@ static int rebind_subsystems(struct cgroupfs_root *root, | |||
702 | * any child cgroups exist. This is theoretically supportable | 702 | * any child cgroups exist. This is theoretically supportable |
703 | * but involves complex error handling, so it's being left until | 703 | * but involves complex error handling, so it's being left until |
704 | * later */ | 704 | * later */ |
705 | if (!list_empty(&cgrp->children)) | 705 | if (root->number_of_cgroups > 1) |
706 | return -EBUSY; | 706 | return -EBUSY; |
707 | 707 | ||
708 | /* Process each subsystem */ | 708 | /* Process each subsystem */ |
@@ -1024,7 +1024,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1024 | if (ret == -EBUSY) { | 1024 | if (ret == -EBUSY) { |
1025 | mutex_unlock(&cgroup_mutex); | 1025 | mutex_unlock(&cgroup_mutex); |
1026 | mutex_unlock(&inode->i_mutex); | 1026 | mutex_unlock(&inode->i_mutex); |
1027 | goto drop_new_super; | 1027 | goto free_cg_links; |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | /* EBUSY should be the only error here */ | 1030 | /* EBUSY should be the only error here */ |
@@ -1073,10 +1073,11 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1073 | 1073 | ||
1074 | return simple_set_mnt(mnt, sb); | 1074 | return simple_set_mnt(mnt, sb); |
1075 | 1075 | ||
1076 | free_cg_links: | ||
1077 | free_cg_links(&tmp_cg_links); | ||
1076 | drop_new_super: | 1078 | drop_new_super: |
1077 | up_write(&sb->s_umount); | 1079 | up_write(&sb->s_umount); |
1078 | deactivate_super(sb); | 1080 | deactivate_super(sb); |
1079 | free_cg_links(&tmp_cg_links); | ||
1080 | return ret; | 1081 | return ret; |
1081 | } | 1082 | } |
1082 | 1083 | ||
@@ -1279,6 +1280,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1279 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) | 1280 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) |
1280 | { | 1281 | { |
1281 | struct task_struct *tsk; | 1282 | struct task_struct *tsk; |
1283 | const struct cred *cred = current_cred(), *tcred; | ||
1282 | int ret; | 1284 | int ret; |
1283 | 1285 | ||
1284 | if (pid) { | 1286 | if (pid) { |
@@ -1288,14 +1290,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) | |||
1288 | rcu_read_unlock(); | 1290 | rcu_read_unlock(); |
1289 | return -ESRCH; | 1291 | return -ESRCH; |
1290 | } | 1292 | } |
1291 | get_task_struct(tsk); | ||
1292 | rcu_read_unlock(); | ||
1293 | 1293 | ||
1294 | if ((current->euid) && (current->euid != tsk->uid) | 1294 | tcred = __task_cred(tsk); |
1295 | && (current->euid != tsk->suid)) { | 1295 | if (cred->euid && |
1296 | put_task_struct(tsk); | 1296 | cred->euid != tcred->uid && |
1297 | cred->euid != tcred->suid) { | ||
1298 | rcu_read_unlock(); | ||
1297 | return -EACCES; | 1299 | return -EACCES; |
1298 | } | 1300 | } |
1301 | get_task_struct(tsk); | ||
1302 | rcu_read_unlock(); | ||
1299 | } else { | 1303 | } else { |
1300 | tsk = current; | 1304 | tsk = current; |
1301 | get_task_struct(tsk); | 1305 | get_task_struct(tsk); |
@@ -2934,9 +2938,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
2934 | again: | 2938 | again: |
2935 | root = subsys->root; | 2939 | root = subsys->root; |
2936 | if (root == &rootnode) { | 2940 | if (root == &rootnode) { |
2937 | printk(KERN_INFO | ||
2938 | "Not cloning cgroup for unused subsystem %s\n", | ||
2939 | subsys->name); | ||
2940 | mutex_unlock(&cgroup_mutex); | 2941 | mutex_unlock(&cgroup_mutex); |
2941 | return 0; | 2942 | return 0; |
2942 | } | 2943 | } |
diff --git a/kernel/cred-internals.h b/kernel/cred-internals.h new file mode 100644 index 000000000000..2dc4fc2d0bf1 --- /dev/null +++ b/kernel/cred-internals.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* Internal credentials stuff | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | /* | ||
13 | * user.c | ||
14 | */ | ||
15 | static inline void sched_switch_user(struct task_struct *p) | ||
16 | { | ||
17 | #ifdef CONFIG_USER_SCHED | ||
18 | sched_move_task(p); | ||
19 | #endif /* CONFIG_USER_SCHED */ | ||
20 | } | ||
21 | |||
diff --git a/kernel/cred.c b/kernel/cred.c new file mode 100644 index 000000000000..ff7bc071991c --- /dev/null +++ b/kernel/cred.c | |||
@@ -0,0 +1,588 @@ | |||
1 | /* Task credentials management - see Documentation/credentials.txt | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/cred.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/key.h> | ||
15 | #include <linux/keyctl.h> | ||
16 | #include <linux/init_task.h> | ||
17 | #include <linux/security.h> | ||
18 | #include <linux/cn_proc.h> | ||
19 | #include "cred-internals.h" | ||
20 | |||
21 | static struct kmem_cache *cred_jar; | ||
22 | |||
23 | /* | ||
24 | * The common credentials for the initial task's thread group | ||
25 | */ | ||
26 | #ifdef CONFIG_KEYS | ||
27 | static struct thread_group_cred init_tgcred = { | ||
28 | .usage = ATOMIC_INIT(2), | ||
29 | .tgid = 0, | ||
30 | .lock = SPIN_LOCK_UNLOCKED, | ||
31 | }; | ||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * The initial credentials for the initial task | ||
36 | */ | ||
37 | struct cred init_cred = { | ||
38 | .usage = ATOMIC_INIT(4), | ||
39 | .securebits = SECUREBITS_DEFAULT, | ||
40 | .cap_inheritable = CAP_INIT_INH_SET, | ||
41 | .cap_permitted = CAP_FULL_SET, | ||
42 | .cap_effective = CAP_INIT_EFF_SET, | ||
43 | .cap_bset = CAP_INIT_BSET, | ||
44 | .user = INIT_USER, | ||
45 | .group_info = &init_groups, | ||
46 | #ifdef CONFIG_KEYS | ||
47 | .tgcred = &init_tgcred, | ||
48 | #endif | ||
49 | }; | ||
50 | |||
51 | /* | ||
52 | * Dispose of the shared task group credentials | ||
53 | */ | ||
54 | #ifdef CONFIG_KEYS | ||
55 | static void release_tgcred_rcu(struct rcu_head *rcu) | ||
56 | { | ||
57 | struct thread_group_cred *tgcred = | ||
58 | container_of(rcu, struct thread_group_cred, rcu); | ||
59 | |||
60 | BUG_ON(atomic_read(&tgcred->usage) != 0); | ||
61 | |||
62 | key_put(tgcred->session_keyring); | ||
63 | key_put(tgcred->process_keyring); | ||
64 | kfree(tgcred); | ||
65 | } | ||
66 | #endif | ||
67 | |||
68 | /* | ||
69 | * Release a set of thread group credentials. | ||
70 | */ | ||
71 | static void release_tgcred(struct cred *cred) | ||
72 | { | ||
73 | #ifdef CONFIG_KEYS | ||
74 | struct thread_group_cred *tgcred = cred->tgcred; | ||
75 | |||
76 | if (atomic_dec_and_test(&tgcred->usage)) | ||
77 | call_rcu(&tgcred->rcu, release_tgcred_rcu); | ||
78 | #endif | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * The RCU callback to actually dispose of a set of credentials | ||
83 | */ | ||
84 | static void put_cred_rcu(struct rcu_head *rcu) | ||
85 | { | ||
86 | struct cred *cred = container_of(rcu, struct cred, rcu); | ||
87 | |||
88 | if (atomic_read(&cred->usage) != 0) | ||
89 | panic("CRED: put_cred_rcu() sees %p with usage %d\n", | ||
90 | cred, atomic_read(&cred->usage)); | ||
91 | |||
92 | security_cred_free(cred); | ||
93 | key_put(cred->thread_keyring); | ||
94 | key_put(cred->request_key_auth); | ||
95 | release_tgcred(cred); | ||
96 | put_group_info(cred->group_info); | ||
97 | free_uid(cred->user); | ||
98 | kmem_cache_free(cred_jar, cred); | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * __put_cred - Destroy a set of credentials | ||
103 | * @cred: The record to release | ||
104 | * | ||
105 | * Destroy a set of credentials on which no references remain. | ||
106 | */ | ||
107 | void __put_cred(struct cred *cred) | ||
108 | { | ||
109 | BUG_ON(atomic_read(&cred->usage) != 0); | ||
110 | |||
111 | call_rcu(&cred->rcu, put_cred_rcu); | ||
112 | } | ||
113 | EXPORT_SYMBOL(__put_cred); | ||
114 | |||
115 | /** | ||
116 | * prepare_creds - Prepare a new set of credentials for modification | ||
117 | * | ||
118 | * Prepare a new set of task credentials for modification. A task's creds | ||
119 | * shouldn't generally be modified directly, therefore this function is used to | ||
120 | * prepare a new copy, which the caller then modifies and then commits by | ||
121 | * calling commit_creds(). | ||
122 | * | ||
123 | * Preparation involves making a copy of the objective creds for modification. | ||
124 | * | ||
125 | * Returns a pointer to the new creds-to-be if successful, NULL otherwise. | ||
126 | * | ||
127 | * Call commit_creds() or abort_creds() to clean up. | ||
128 | */ | ||
129 | struct cred *prepare_creds(void) | ||
130 | { | ||
131 | struct task_struct *task = current; | ||
132 | const struct cred *old; | ||
133 | struct cred *new; | ||
134 | |||
135 | BUG_ON(atomic_read(&task->real_cred->usage) < 1); | ||
136 | |||
137 | new = kmem_cache_alloc(cred_jar, GFP_KERNEL); | ||
138 | if (!new) | ||
139 | return NULL; | ||
140 | |||
141 | old = task->cred; | ||
142 | memcpy(new, old, sizeof(struct cred)); | ||
143 | |||
144 | atomic_set(&new->usage, 1); | ||
145 | get_group_info(new->group_info); | ||
146 | get_uid(new->user); | ||
147 | |||
148 | #ifdef CONFIG_KEYS | ||
149 | key_get(new->thread_keyring); | ||
150 | key_get(new->request_key_auth); | ||
151 | atomic_inc(&new->tgcred->usage); | ||
152 | #endif | ||
153 | |||
154 | #ifdef CONFIG_SECURITY | ||
155 | new->security = NULL; | ||
156 | #endif | ||
157 | |||
158 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) | ||
159 | goto error; | ||
160 | return new; | ||
161 | |||
162 | error: | ||
163 | abort_creds(new); | ||
164 | return NULL; | ||
165 | } | ||
166 | EXPORT_SYMBOL(prepare_creds); | ||
167 | |||
168 | /* | ||
169 | * Prepare credentials for current to perform an execve() | ||
170 | * - The caller must hold current->cred_exec_mutex | ||
171 | */ | ||
172 | struct cred *prepare_exec_creds(void) | ||
173 | { | ||
174 | struct thread_group_cred *tgcred = NULL; | ||
175 | struct cred *new; | ||
176 | |||
177 | #ifdef CONFIG_KEYS | ||
178 | tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); | ||
179 | if (!tgcred) | ||
180 | return NULL; | ||
181 | #endif | ||
182 | |||
183 | new = prepare_creds(); | ||
184 | if (!new) { | ||
185 | kfree(tgcred); | ||
186 | return new; | ||
187 | } | ||
188 | |||
189 | #ifdef CONFIG_KEYS | ||
190 | /* newly exec'd tasks don't get a thread keyring */ | ||
191 | key_put(new->thread_keyring); | ||
192 | new->thread_keyring = NULL; | ||
193 | |||
194 | /* create a new per-thread-group creds for all this set of threads to | ||
195 | * share */ | ||
196 | memcpy(tgcred, new->tgcred, sizeof(struct thread_group_cred)); | ||
197 | |||
198 | atomic_set(&tgcred->usage, 1); | ||
199 | spin_lock_init(&tgcred->lock); | ||
200 | |||
201 | /* inherit the session keyring; new process keyring */ | ||
202 | key_get(tgcred->session_keyring); | ||
203 | tgcred->process_keyring = NULL; | ||
204 | |||
205 | release_tgcred(new); | ||
206 | new->tgcred = tgcred; | ||
207 | #endif | ||
208 | |||
209 | return new; | ||
210 | } | ||
211 | |||
212 | /* | ||
213 | * prepare new credentials for the usermode helper dispatcher | ||
214 | */ | ||
215 | struct cred *prepare_usermodehelper_creds(void) | ||
216 | { | ||
217 | #ifdef CONFIG_KEYS | ||
218 | struct thread_group_cred *tgcred = NULL; | ||
219 | #endif | ||
220 | struct cred *new; | ||
221 | |||
222 | #ifdef CONFIG_KEYS | ||
223 | tgcred = kzalloc(sizeof(*new->tgcred), GFP_ATOMIC); | ||
224 | if (!tgcred) | ||
225 | return NULL; | ||
226 | #endif | ||
227 | |||
228 | new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); | ||
229 | if (!new) | ||
230 | return NULL; | ||
231 | |||
232 | memcpy(new, &init_cred, sizeof(struct cred)); | ||
233 | |||
234 | atomic_set(&new->usage, 1); | ||
235 | get_group_info(new->group_info); | ||
236 | get_uid(new->user); | ||
237 | |||
238 | #ifdef CONFIG_KEYS | ||
239 | new->thread_keyring = NULL; | ||
240 | new->request_key_auth = NULL; | ||
241 | new->jit_keyring = KEY_REQKEY_DEFL_DEFAULT; | ||
242 | |||
243 | atomic_set(&tgcred->usage, 1); | ||
244 | spin_lock_init(&tgcred->lock); | ||
245 | new->tgcred = tgcred; | ||
246 | #endif | ||
247 | |||
248 | #ifdef CONFIG_SECURITY | ||
249 | new->security = NULL; | ||
250 | #endif | ||
251 | if (security_prepare_creds(new, &init_cred, GFP_ATOMIC) < 0) | ||
252 | goto error; | ||
253 | |||
254 | BUG_ON(atomic_read(&new->usage) != 1); | ||
255 | return new; | ||
256 | |||
257 | error: | ||
258 | put_cred(new); | ||
259 | return NULL; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * Copy credentials for the new process created by fork() | ||
264 | * | ||
265 | * We share if we can, but under some circumstances we have to generate a new | ||
266 | * set. | ||
267 | * | ||
268 | * The new process gets the current process's subjective credentials as its | ||
269 | * objective and subjective credentials | ||
270 | */ | ||
271 | int copy_creds(struct task_struct *p, unsigned long clone_flags) | ||
272 | { | ||
273 | #ifdef CONFIG_KEYS | ||
274 | struct thread_group_cred *tgcred; | ||
275 | #endif | ||
276 | struct cred *new; | ||
277 | int ret; | ||
278 | |||
279 | mutex_init(&p->cred_exec_mutex); | ||
280 | |||
281 | if ( | ||
282 | #ifdef CONFIG_KEYS | ||
283 | !p->cred->thread_keyring && | ||
284 | #endif | ||
285 | clone_flags & CLONE_THREAD | ||
286 | ) { | ||
287 | p->real_cred = get_cred(p->cred); | ||
288 | get_cred(p->cred); | ||
289 | atomic_inc(&p->cred->user->processes); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | new = prepare_creds(); | ||
294 | if (!new) | ||
295 | return -ENOMEM; | ||
296 | |||
297 | if (clone_flags & CLONE_NEWUSER) { | ||
298 | ret = create_user_ns(new); | ||
299 | if (ret < 0) | ||
300 | goto error_put; | ||
301 | } | ||
302 | |||
303 | #ifdef CONFIG_KEYS | ||
304 | /* new threads get their own thread keyrings if their parent already | ||
305 | * had one */ | ||
306 | if (new->thread_keyring) { | ||
307 | key_put(new->thread_keyring); | ||
308 | new->thread_keyring = NULL; | ||
309 | if (clone_flags & CLONE_THREAD) | ||
310 | install_thread_keyring_to_cred(new); | ||
311 | } | ||
312 | |||
313 | /* we share the process and session keyrings between all the threads in | ||
314 | * a process - this is slightly icky as we violate COW credentials a | ||
315 | * bit */ | ||
316 | if (!(clone_flags & CLONE_THREAD)) { | ||
317 | tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL); | ||
318 | if (!tgcred) { | ||
319 | ret = -ENOMEM; | ||
320 | goto error_put; | ||
321 | } | ||
322 | atomic_set(&tgcred->usage, 1); | ||
323 | spin_lock_init(&tgcred->lock); | ||
324 | tgcred->process_keyring = NULL; | ||
325 | tgcred->session_keyring = key_get(new->tgcred->session_keyring); | ||
326 | |||
327 | release_tgcred(new); | ||
328 | new->tgcred = tgcred; | ||
329 | } | ||
330 | #endif | ||
331 | |||
332 | atomic_inc(&new->user->processes); | ||
333 | p->cred = p->real_cred = get_cred(new); | ||
334 | return 0; | ||
335 | |||
336 | error_put: | ||
337 | put_cred(new); | ||
338 | return ret; | ||
339 | } | ||
340 | |||
341 | /** | ||
342 | * commit_creds - Install new credentials upon the current task | ||
343 | * @new: The credentials to be assigned | ||
344 | * | ||
345 | * Install a new set of credentials to the current task, using RCU to replace | ||
346 | * the old set. Both the objective and the subjective credentials pointers are | ||
347 | * updated. This function may not be called if the subjective credentials are | ||
348 | * in an overridden state. | ||
349 | * | ||
350 | * This function eats the caller's reference to the new credentials. | ||
351 | * | ||
352 | * Always returns 0 thus allowing this function to be tail-called at the end | ||
353 | * of, say, sys_setgid(). | ||
354 | */ | ||
355 | int commit_creds(struct cred *new) | ||
356 | { | ||
357 | struct task_struct *task = current; | ||
358 | const struct cred *old; | ||
359 | |||
360 | BUG_ON(task->cred != task->real_cred); | ||
361 | BUG_ON(atomic_read(&task->real_cred->usage) < 2); | ||
362 | BUG_ON(atomic_read(&new->usage) < 1); | ||
363 | |||
364 | old = task->real_cred; | ||
365 | security_commit_creds(new, old); | ||
366 | |||
367 | get_cred(new); /* we will require a ref for the subj creds too */ | ||
368 | |||
369 | /* dumpability changes */ | ||
370 | if (old->euid != new->euid || | ||
371 | old->egid != new->egid || | ||
372 | old->fsuid != new->fsuid || | ||
373 | old->fsgid != new->fsgid || | ||
374 | !cap_issubset(new->cap_permitted, old->cap_permitted)) { | ||
375 | set_dumpable(task->mm, suid_dumpable); | ||
376 | task->pdeath_signal = 0; | ||
377 | smp_wmb(); | ||
378 | } | ||
379 | |||
380 | /* alter the thread keyring */ | ||
381 | if (new->fsuid != old->fsuid) | ||
382 | key_fsuid_changed(task); | ||
383 | if (new->fsgid != old->fsgid) | ||
384 | key_fsgid_changed(task); | ||
385 | |||
386 | /* do it | ||
387 | * - What if a process setreuid()'s and this brings the | ||
388 | * new uid over his NPROC rlimit? We can check this now | ||
389 | * cheaply with the new uid cache, so if it matters | ||
390 | * we should be checking for it. -DaveM | ||
391 | */ | ||
392 | if (new->user != old->user) | ||
393 | atomic_inc(&new->user->processes); | ||
394 | rcu_assign_pointer(task->real_cred, new); | ||
395 | rcu_assign_pointer(task->cred, new); | ||
396 | if (new->user != old->user) | ||
397 | atomic_dec(&old->user->processes); | ||
398 | |||
399 | sched_switch_user(task); | ||
400 | |||
401 | /* send notifications */ | ||
402 | if (new->uid != old->uid || | ||
403 | new->euid != old->euid || | ||
404 | new->suid != old->suid || | ||
405 | new->fsuid != old->fsuid) | ||
406 | proc_id_connector(task, PROC_EVENT_UID); | ||
407 | |||
408 | if (new->gid != old->gid || | ||
409 | new->egid != old->egid || | ||
410 | new->sgid != old->sgid || | ||
411 | new->fsgid != old->fsgid) | ||
412 | proc_id_connector(task, PROC_EVENT_GID); | ||
413 | |||
414 | /* release the old obj and subj refs both */ | ||
415 | put_cred(old); | ||
416 | put_cred(old); | ||
417 | return 0; | ||
418 | } | ||
419 | EXPORT_SYMBOL(commit_creds); | ||
420 | |||
421 | /** | ||
422 | * abort_creds - Discard a set of credentials and unlock the current task | ||
423 | * @new: The credentials that were going to be applied | ||
424 | * | ||
425 | * Discard a set of credentials that were under construction and unlock the | ||
426 | * current task. | ||
427 | */ | ||
428 | void abort_creds(struct cred *new) | ||
429 | { | ||
430 | BUG_ON(atomic_read(&new->usage) < 1); | ||
431 | put_cred(new); | ||
432 | } | ||
433 | EXPORT_SYMBOL(abort_creds); | ||
434 | |||
435 | /** | ||
436 | * override_creds - Override the current process's subjective credentials | ||
437 | * @new: The credentials to be assigned | ||
438 | * | ||
439 | * Install a set of temporary override subjective credentials on the current | ||
440 | * process, returning the old set for later reversion. | ||
441 | */ | ||
442 | const struct cred *override_creds(const struct cred *new) | ||
443 | { | ||
444 | const struct cred *old = current->cred; | ||
445 | |||
446 | rcu_assign_pointer(current->cred, get_cred(new)); | ||
447 | return old; | ||
448 | } | ||
449 | EXPORT_SYMBOL(override_creds); | ||
450 | |||
451 | /** | ||
452 | * revert_creds - Revert a temporary subjective credentials override | ||
453 | * @old: The credentials to be restored | ||
454 | * | ||
455 | * Revert a temporary set of override subjective credentials to an old set, | ||
456 | * discarding the override set. | ||
457 | */ | ||
458 | void revert_creds(const struct cred *old) | ||
459 | { | ||
460 | const struct cred *override = current->cred; | ||
461 | |||
462 | rcu_assign_pointer(current->cred, old); | ||
463 | put_cred(override); | ||
464 | } | ||
465 | EXPORT_SYMBOL(revert_creds); | ||
466 | |||
467 | /* | ||
468 | * initialise the credentials stuff | ||
469 | */ | ||
470 | void __init cred_init(void) | ||
471 | { | ||
472 | /* allocate a slab in which we can store credentials */ | ||
473 | cred_jar = kmem_cache_create("cred_jar", sizeof(struct cred), | ||
474 | 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | ||
475 | } | ||
476 | |||
477 | /** | ||
478 | * prepare_kernel_cred - Prepare a set of credentials for a kernel service | ||
479 | * @daemon: A userspace daemon to be used as a reference | ||
480 | * | ||
481 | * Prepare a set of credentials for a kernel service. This can then be used to | ||
482 | * override a task's own credentials so that work can be done on behalf of that | ||
483 | * task that requires a different subjective context. | ||
484 | * | ||
485 | * @daemon is used to provide a base for the security record, but can be NULL. | ||
486 | * If @daemon is supplied, then the security data will be derived from that; | ||
487 | * otherwise they'll be set to 0 and no groups, full capabilities and no keys. | ||
488 | * | ||
489 | * The caller may change these controls afterwards if desired. | ||
490 | * | ||
491 | * Returns the new credentials or NULL if out of memory. | ||
492 | * | ||
493 | * Does not take, and does not return holding current->cred_replace_mutex. | ||
494 | */ | ||
495 | struct cred *prepare_kernel_cred(struct task_struct *daemon) | ||
496 | { | ||
497 | const struct cred *old; | ||
498 | struct cred *new; | ||
499 | |||
500 | new = kmem_cache_alloc(cred_jar, GFP_KERNEL); | ||
501 | if (!new) | ||
502 | return NULL; | ||
503 | |||
504 | if (daemon) | ||
505 | old = get_task_cred(daemon); | ||
506 | else | ||
507 | old = get_cred(&init_cred); | ||
508 | |||
509 | get_uid(new->user); | ||
510 | get_group_info(new->group_info); | ||
511 | |||
512 | #ifdef CONFIG_KEYS | ||
513 | atomic_inc(&init_tgcred.usage); | ||
514 | new->tgcred = &init_tgcred; | ||
515 | new->request_key_auth = NULL; | ||
516 | new->thread_keyring = NULL; | ||
517 | new->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; | ||
518 | #endif | ||
519 | |||
520 | #ifdef CONFIG_SECURITY | ||
521 | new->security = NULL; | ||
522 | #endif | ||
523 | if (security_prepare_creds(new, old, GFP_KERNEL) < 0) | ||
524 | goto error; | ||
525 | |||
526 | atomic_set(&new->usage, 1); | ||
527 | put_cred(old); | ||
528 | return new; | ||
529 | |||
530 | error: | ||
531 | put_cred(new); | ||
532 | return NULL; | ||
533 | } | ||
534 | EXPORT_SYMBOL(prepare_kernel_cred); | ||
535 | |||
536 | /** | ||
537 | * set_security_override - Set the security ID in a set of credentials | ||
538 | * @new: The credentials to alter | ||
539 | * @secid: The LSM security ID to set | ||
540 | * | ||
541 | * Set the LSM security ID in a set of credentials so that the subjective | ||
542 | * security is overridden when an alternative set of credentials is used. | ||
543 | */ | ||
544 | int set_security_override(struct cred *new, u32 secid) | ||
545 | { | ||
546 | return security_kernel_act_as(new, secid); | ||
547 | } | ||
548 | EXPORT_SYMBOL(set_security_override); | ||
549 | |||
550 | /** | ||
551 | * set_security_override_from_ctx - Set the security ID in a set of credentials | ||
552 | * @new: The credentials to alter | ||
553 | * @secctx: The LSM security context to generate the security ID from. | ||
554 | * | ||
555 | * Set the LSM security ID in a set of credentials so that the subjective | ||
556 | * security is overridden when an alternative set of credentials is used. The | ||
557 | * security ID is specified in string form as a security context to be | ||
558 | * interpreted by the LSM. | ||
559 | */ | ||
560 | int set_security_override_from_ctx(struct cred *new, const char *secctx) | ||
561 | { | ||
562 | u32 secid; | ||
563 | int ret; | ||
564 | |||
565 | ret = security_secctx_to_secid(secctx, strlen(secctx), &secid); | ||
566 | if (ret < 0) | ||
567 | return ret; | ||
568 | |||
569 | return set_security_override(new, secid); | ||
570 | } | ||
571 | EXPORT_SYMBOL(set_security_override_from_ctx); | ||
572 | |||
573 | /** | ||
574 | * set_create_files_as - Set the LSM file create context in a set of credentials | ||
575 | * @new: The credentials to alter | ||
576 | * @inode: The inode to take the context from | ||
577 | * | ||
578 | * Change the LSM file creation context in a set of credentials to be the same | ||
579 | * as the object context of the specified inode, so that the new inodes have | ||
580 | * the same MAC context as that inode. | ||
581 | */ | ||
582 | int set_create_files_as(struct cred *new, struct inode *inode) | ||
583 | { | ||
584 | new->fsuid = inode->i_uid; | ||
585 | new->fsgid = inode->i_gid; | ||
586 | return security_kernel_create_files_as(new, inode); | ||
587 | } | ||
588 | EXPORT_SYMBOL(set_create_files_as); | ||
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index b3179dad71be..abb6e17505e2 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -127,7 +127,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
127 | */ | 127 | */ |
128 | t1 = tsk->sched_info.pcount; | 128 | t1 = tsk->sched_info.pcount; |
129 | t2 = tsk->sched_info.run_delay; | 129 | t2 = tsk->sched_info.run_delay; |
130 | t3 = tsk->sched_info.cpu_time; | 130 | t3 = tsk->se.sum_exec_runtime; |
131 | 131 | ||
132 | d->cpu_count += t1; | 132 | d->cpu_count += t1; |
133 | 133 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 2d8be7ebb0f7..c7422ca92038 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -46,12 +46,18 @@ | |||
46 | #include <linux/blkdev.h> | 46 | #include <linux/blkdev.h> |
47 | #include <linux/task_io_accounting_ops.h> | 47 | #include <linux/task_io_accounting_ops.h> |
48 | #include <linux/tracehook.h> | 48 | #include <linux/tracehook.h> |
49 | #include <linux/init_task.h> | ||
49 | #include <trace/sched.h> | 50 | #include <trace/sched.h> |
50 | 51 | ||
51 | #include <asm/uaccess.h> | 52 | #include <asm/uaccess.h> |
52 | #include <asm/unistd.h> | 53 | #include <asm/unistd.h> |
53 | #include <asm/pgtable.h> | 54 | #include <asm/pgtable.h> |
54 | #include <asm/mmu_context.h> | 55 | #include <asm/mmu_context.h> |
56 | #include "cred-internals.h" | ||
57 | |||
58 | DEFINE_TRACE(sched_process_free); | ||
59 | DEFINE_TRACE(sched_process_exit); | ||
60 | DEFINE_TRACE(sched_process_wait); | ||
55 | 61 | ||
56 | static void exit_mm(struct task_struct * tsk); | 62 | static void exit_mm(struct task_struct * tsk); |
57 | 63 | ||
@@ -164,7 +170,10 @@ void release_task(struct task_struct * p) | |||
164 | int zap_leader; | 170 | int zap_leader; |
165 | repeat: | 171 | repeat: |
166 | tracehook_prepare_release_task(p); | 172 | tracehook_prepare_release_task(p); |
167 | atomic_dec(&p->user->processes); | 173 | /* don't need to get the RCU readlock here - the process is dead and |
174 | * can't be modifying its own credentials */ | ||
175 | atomic_dec(&__task_cred(p)->user->processes); | ||
176 | |||
168 | proc_flush_task(p); | 177 | proc_flush_task(p); |
169 | write_lock_irq(&tasklist_lock); | 178 | write_lock_irq(&tasklist_lock); |
170 | tracehook_finish_release_task(p); | 179 | tracehook_finish_release_task(p); |
@@ -339,12 +348,12 @@ static void reparent_to_kthreadd(void) | |||
339 | /* cpus_allowed? */ | 348 | /* cpus_allowed? */ |
340 | /* rt_priority? */ | 349 | /* rt_priority? */ |
341 | /* signals? */ | 350 | /* signals? */ |
342 | security_task_reparent_to_init(current); | ||
343 | memcpy(current->signal->rlim, init_task.signal->rlim, | 351 | memcpy(current->signal->rlim, init_task.signal->rlim, |
344 | sizeof(current->signal->rlim)); | 352 | sizeof(current->signal->rlim)); |
345 | atomic_inc(&(INIT_USER->__count)); | 353 | |
354 | atomic_inc(&init_cred.usage); | ||
355 | commit_creds(&init_cred); | ||
346 | write_unlock_irq(&tasklist_lock); | 356 | write_unlock_irq(&tasklist_lock); |
347 | switch_uid(INIT_USER); | ||
348 | } | 357 | } |
349 | 358 | ||
350 | void __set_special_pids(struct pid *pid) | 359 | void __set_special_pids(struct pid *pid) |
@@ -1078,7 +1087,6 @@ NORET_TYPE void do_exit(long code) | |||
1078 | check_stack_usage(); | 1087 | check_stack_usage(); |
1079 | exit_thread(); | 1088 | exit_thread(); |
1080 | cgroup_exit(tsk, 1); | 1089 | cgroup_exit(tsk, 1); |
1081 | exit_keys(tsk); | ||
1082 | 1090 | ||
1083 | if (group_dead && tsk->signal->leader) | 1091 | if (group_dead && tsk->signal->leader) |
1084 | disassociate_ctty(1); | 1092 | disassociate_ctty(1); |
@@ -1123,7 +1131,6 @@ NORET_TYPE void do_exit(long code) | |||
1123 | preempt_disable(); | 1131 | preempt_disable(); |
1124 | /* causes final put_task_struct in finish_task_switch(). */ | 1132 | /* causes final put_task_struct in finish_task_switch(). */ |
1125 | tsk->state = TASK_DEAD; | 1133 | tsk->state = TASK_DEAD; |
1126 | |||
1127 | schedule(); | 1134 | schedule(); |
1128 | BUG(); | 1135 | BUG(); |
1129 | /* Avoid "noreturn function does return". */ | 1136 | /* Avoid "noreturn function does return". */ |
@@ -1263,12 +1270,12 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1263 | unsigned long state; | 1270 | unsigned long state; |
1264 | int retval, status, traced; | 1271 | int retval, status, traced; |
1265 | pid_t pid = task_pid_vnr(p); | 1272 | pid_t pid = task_pid_vnr(p); |
1273 | uid_t uid = __task_cred(p)->uid; | ||
1266 | 1274 | ||
1267 | if (!likely(options & WEXITED)) | 1275 | if (!likely(options & WEXITED)) |
1268 | return 0; | 1276 | return 0; |
1269 | 1277 | ||
1270 | if (unlikely(options & WNOWAIT)) { | 1278 | if (unlikely(options & WNOWAIT)) { |
1271 | uid_t uid = p->uid; | ||
1272 | int exit_code = p->exit_code; | 1279 | int exit_code = p->exit_code; |
1273 | int why, status; | 1280 | int why, status; |
1274 | 1281 | ||
@@ -1389,7 +1396,7 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1389 | if (!retval && infop) | 1396 | if (!retval && infop) |
1390 | retval = put_user(pid, &infop->si_pid); | 1397 | retval = put_user(pid, &infop->si_pid); |
1391 | if (!retval && infop) | 1398 | if (!retval && infop) |
1392 | retval = put_user(p->uid, &infop->si_uid); | 1399 | retval = put_user(uid, &infop->si_uid); |
1393 | if (!retval) | 1400 | if (!retval) |
1394 | retval = pid; | 1401 | retval = pid; |
1395 | 1402 | ||
@@ -1454,7 +1461,8 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
1454 | if (!unlikely(options & WNOWAIT)) | 1461 | if (!unlikely(options & WNOWAIT)) |
1455 | p->exit_code = 0; | 1462 | p->exit_code = 0; |
1456 | 1463 | ||
1457 | uid = p->uid; | 1464 | /* don't need the RCU readlock here as we're holding a spinlock */ |
1465 | uid = __task_cred(p)->uid; | ||
1458 | unlock_sig: | 1466 | unlock_sig: |
1459 | spin_unlock_irq(&p->sighand->siglock); | 1467 | spin_unlock_irq(&p->sighand->siglock); |
1460 | if (!exit_code) | 1468 | if (!exit_code) |
@@ -1528,10 +1536,10 @@ static int wait_task_continued(struct task_struct *p, int options, | |||
1528 | } | 1536 | } |
1529 | if (!unlikely(options & WNOWAIT)) | 1537 | if (!unlikely(options & WNOWAIT)) |
1530 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1538 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1539 | uid = __task_cred(p)->uid; | ||
1531 | spin_unlock_irq(&p->sighand->siglock); | 1540 | spin_unlock_irq(&p->sighand->siglock); |
1532 | 1541 | ||
1533 | pid = task_pid_vnr(p); | 1542 | pid = task_pid_vnr(p); |
1534 | uid = p->uid; | ||
1535 | get_task_struct(p); | 1543 | get_task_struct(p); |
1536 | read_unlock(&tasklist_lock); | 1544 | read_unlock(&tasklist_lock); |
1537 | 1545 | ||
diff --git a/kernel/extable.c b/kernel/extable.c index a26cb2e17023..feb0317cf09a 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #include <linux/module.h> | 18 | #include <linux/module.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/ftrace.h> | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/sections.h> | 22 | #include <asm/sections.h> |
22 | 23 | ||
@@ -40,7 +41,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) | |||
40 | return e; | 41 | return e; |
41 | } | 42 | } |
42 | 43 | ||
43 | int core_kernel_text(unsigned long addr) | 44 | __notrace_funcgraph int core_kernel_text(unsigned long addr) |
44 | { | 45 | { |
45 | if (addr >= (unsigned long)_stext && | 46 | if (addr >= (unsigned long)_stext && |
46 | addr <= (unsigned long)_etext) | 47 | addr <= (unsigned long)_etext) |
@@ -53,7 +54,7 @@ int core_kernel_text(unsigned long addr) | |||
53 | return 0; | 54 | return 0; |
54 | } | 55 | } |
55 | 56 | ||
56 | int __kernel_text_address(unsigned long addr) | 57 | __notrace_funcgraph int __kernel_text_address(unsigned long addr) |
57 | { | 58 | { |
58 | if (core_kernel_text(addr)) | 59 | if (core_kernel_text(addr)) |
59 | return 1; | 60 | return 1; |
diff --git a/kernel/fork.c b/kernel/fork.c index 2a372a0e206f..6144b36cd897 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/mount.h> | 47 | #include <linux/mount.h> |
48 | #include <linux/audit.h> | 48 | #include <linux/audit.h> |
49 | #include <linux/memcontrol.h> | 49 | #include <linux/memcontrol.h> |
50 | #include <linux/ftrace.h> | ||
50 | #include <linux/profile.h> | 51 | #include <linux/profile.h> |
51 | #include <linux/rmap.h> | 52 | #include <linux/rmap.h> |
52 | #include <linux/acct.h> | 53 | #include <linux/acct.h> |
@@ -80,6 +81,8 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0; | |||
80 | 81 | ||
81 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ | 82 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
82 | 83 | ||
84 | DEFINE_TRACE(sched_process_fork); | ||
85 | |||
83 | int nr_processes(void) | 86 | int nr_processes(void) |
84 | { | 87 | { |
85 | int cpu; | 88 | int cpu; |
@@ -137,6 +140,7 @@ void free_task(struct task_struct *tsk) | |||
137 | prop_local_destroy_single(&tsk->dirties); | 140 | prop_local_destroy_single(&tsk->dirties); |
138 | free_thread_info(tsk->stack); | 141 | free_thread_info(tsk->stack); |
139 | rt_mutex_debug_task_free(tsk); | 142 | rt_mutex_debug_task_free(tsk); |
143 | ftrace_graph_exit_task(tsk); | ||
140 | free_task_struct(tsk); | 144 | free_task_struct(tsk); |
141 | } | 145 | } |
142 | EXPORT_SYMBOL(free_task); | 146 | EXPORT_SYMBOL(free_task); |
@@ -147,9 +151,8 @@ void __put_task_struct(struct task_struct *tsk) | |||
147 | WARN_ON(atomic_read(&tsk->usage)); | 151 | WARN_ON(atomic_read(&tsk->usage)); |
148 | WARN_ON(tsk == current); | 152 | WARN_ON(tsk == current); |
149 | 153 | ||
150 | security_task_free(tsk); | 154 | put_cred(tsk->real_cred); |
151 | free_uid(tsk->user); | 155 | put_cred(tsk->cred); |
152 | put_group_info(tsk->group_info); | ||
153 | delayacct_tsk_free(tsk); | 156 | delayacct_tsk_free(tsk); |
154 | 157 | ||
155 | if (!profile_handoff_task(tsk)) | 158 | if (!profile_handoff_task(tsk)) |
@@ -315,17 +318,20 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
315 | file = tmp->vm_file; | 318 | file = tmp->vm_file; |
316 | if (file) { | 319 | if (file) { |
317 | struct inode *inode = file->f_path.dentry->d_inode; | 320 | struct inode *inode = file->f_path.dentry->d_inode; |
321 | struct address_space *mapping = file->f_mapping; | ||
322 | |||
318 | get_file(file); | 323 | get_file(file); |
319 | if (tmp->vm_flags & VM_DENYWRITE) | 324 | if (tmp->vm_flags & VM_DENYWRITE) |
320 | atomic_dec(&inode->i_writecount); | 325 | atomic_dec(&inode->i_writecount); |
321 | 326 | spin_lock(&mapping->i_mmap_lock); | |
322 | /* insert tmp into the share list, just after mpnt */ | 327 | if (tmp->vm_flags & VM_SHARED) |
323 | spin_lock(&file->f_mapping->i_mmap_lock); | 328 | mapping->i_mmap_writable++; |
324 | tmp->vm_truncate_count = mpnt->vm_truncate_count; | 329 | tmp->vm_truncate_count = mpnt->vm_truncate_count; |
325 | flush_dcache_mmap_lock(file->f_mapping); | 330 | flush_dcache_mmap_lock(mapping); |
331 | /* insert tmp into the share list, just after mpnt */ | ||
326 | vma_prio_tree_add(tmp, mpnt); | 332 | vma_prio_tree_add(tmp, mpnt); |
327 | flush_dcache_mmap_unlock(file->f_mapping); | 333 | flush_dcache_mmap_unlock(mapping); |
328 | spin_unlock(&file->f_mapping->i_mmap_lock); | 334 | spin_unlock(&mapping->i_mmap_lock); |
329 | } | 335 | } |
330 | 336 | ||
331 | /* | 337 | /* |
@@ -815,12 +821,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
815 | if (!sig) | 821 | if (!sig) |
816 | return -ENOMEM; | 822 | return -ENOMEM; |
817 | 823 | ||
818 | ret = copy_thread_group_keys(tsk); | ||
819 | if (ret < 0) { | ||
820 | kmem_cache_free(signal_cachep, sig); | ||
821 | return ret; | ||
822 | } | ||
823 | |||
824 | atomic_set(&sig->count, 1); | 824 | atomic_set(&sig->count, 1); |
825 | atomic_set(&sig->live, 1); | 825 | atomic_set(&sig->live, 1); |
826 | init_waitqueue_head(&sig->wait_chldexit); | 826 | init_waitqueue_head(&sig->wait_chldexit); |
@@ -865,7 +865,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
865 | void __cleanup_signal(struct signal_struct *sig) | 865 | void __cleanup_signal(struct signal_struct *sig) |
866 | { | 866 | { |
867 | thread_group_cputime_free(sig); | 867 | thread_group_cputime_free(sig); |
868 | exit_thread_group_keys(sig); | ||
869 | tty_kref_put(sig->tty); | 868 | tty_kref_put(sig->tty); |
870 | kmem_cache_free(signal_cachep, sig); | 869 | kmem_cache_free(signal_cachep, sig); |
871 | } | 870 | } |
@@ -981,16 +980,16 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
981 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); | 980 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
982 | #endif | 981 | #endif |
983 | retval = -EAGAIN; | 982 | retval = -EAGAIN; |
984 | if (atomic_read(&p->user->processes) >= | 983 | if (atomic_read(&p->real_cred->user->processes) >= |
985 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { | 984 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { |
986 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 985 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && |
987 | p->user != current->nsproxy->user_ns->root_user) | 986 | p->real_cred->user != INIT_USER) |
988 | goto bad_fork_free; | 987 | goto bad_fork_free; |
989 | } | 988 | } |
990 | 989 | ||
991 | atomic_inc(&p->user->__count); | 990 | retval = copy_creds(p, clone_flags); |
992 | atomic_inc(&p->user->processes); | 991 | if (retval < 0) |
993 | get_group_info(p->group_info); | 992 | goto bad_fork_free; |
994 | 993 | ||
995 | /* | 994 | /* |
996 | * If multiple threads are within copy_process(), then this check | 995 | * If multiple threads are within copy_process(), then this check |
@@ -1045,10 +1044,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1045 | do_posix_clock_monotonic_gettime(&p->start_time); | 1044 | do_posix_clock_monotonic_gettime(&p->start_time); |
1046 | p->real_start_time = p->start_time; | 1045 | p->real_start_time = p->start_time; |
1047 | monotonic_to_bootbased(&p->real_start_time); | 1046 | monotonic_to_bootbased(&p->real_start_time); |
1048 | #ifdef CONFIG_SECURITY | ||
1049 | p->security = NULL; | ||
1050 | #endif | ||
1051 | p->cap_bset = current->cap_bset; | ||
1052 | p->io_context = NULL; | 1047 | p->io_context = NULL; |
1053 | p->audit_context = NULL; | 1048 | p->audit_context = NULL; |
1054 | cgroup_fork(p); | 1049 | cgroup_fork(p); |
@@ -1089,14 +1084,14 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1089 | #ifdef CONFIG_DEBUG_MUTEXES | 1084 | #ifdef CONFIG_DEBUG_MUTEXES |
1090 | p->blocked_on = NULL; /* not blocked yet */ | 1085 | p->blocked_on = NULL; /* not blocked yet */ |
1091 | #endif | 1086 | #endif |
1087 | if (unlikely(ptrace_reparented(current))) | ||
1088 | ptrace_fork(p, clone_flags); | ||
1092 | 1089 | ||
1093 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1090 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1094 | sched_fork(p, clone_flags); | 1091 | sched_fork(p, clone_flags); |
1095 | 1092 | ||
1096 | if ((retval = security_task_alloc(p))) | ||
1097 | goto bad_fork_cleanup_policy; | ||
1098 | if ((retval = audit_alloc(p))) | 1093 | if ((retval = audit_alloc(p))) |
1099 | goto bad_fork_cleanup_security; | 1094 | goto bad_fork_cleanup_policy; |
1100 | /* copy all the process information */ | 1095 | /* copy all the process information */ |
1101 | if ((retval = copy_semundo(clone_flags, p))) | 1096 | if ((retval = copy_semundo(clone_flags, p))) |
1102 | goto bad_fork_cleanup_audit; | 1097 | goto bad_fork_cleanup_audit; |
@@ -1110,10 +1105,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1110 | goto bad_fork_cleanup_sighand; | 1105 | goto bad_fork_cleanup_sighand; |
1111 | if ((retval = copy_mm(clone_flags, p))) | 1106 | if ((retval = copy_mm(clone_flags, p))) |
1112 | goto bad_fork_cleanup_signal; | 1107 | goto bad_fork_cleanup_signal; |
1113 | if ((retval = copy_keys(clone_flags, p))) | ||
1114 | goto bad_fork_cleanup_mm; | ||
1115 | if ((retval = copy_namespaces(clone_flags, p))) | 1108 | if ((retval = copy_namespaces(clone_flags, p))) |
1116 | goto bad_fork_cleanup_keys; | 1109 | goto bad_fork_cleanup_mm; |
1117 | if ((retval = copy_io(clone_flags, p))) | 1110 | if ((retval = copy_io(clone_flags, p))) |
1118 | goto bad_fork_cleanup_namespaces; | 1111 | goto bad_fork_cleanup_namespaces; |
1119 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); | 1112 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); |
@@ -1133,6 +1126,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1133 | } | 1126 | } |
1134 | } | 1127 | } |
1135 | 1128 | ||
1129 | ftrace_graph_init_task(p); | ||
1130 | |||
1136 | p->pid = pid_nr(pid); | 1131 | p->pid = pid_nr(pid); |
1137 | p->tgid = p->pid; | 1132 | p->tgid = p->pid; |
1138 | if (clone_flags & CLONE_THREAD) | 1133 | if (clone_flags & CLONE_THREAD) |
@@ -1141,7 +1136,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1141 | if (current->nsproxy != p->nsproxy) { | 1136 | if (current->nsproxy != p->nsproxy) { |
1142 | retval = ns_cgroup_clone(p, pid); | 1137 | retval = ns_cgroup_clone(p, pid); |
1143 | if (retval) | 1138 | if (retval) |
1144 | goto bad_fork_free_pid; | 1139 | goto bad_fork_free_graph; |
1145 | } | 1140 | } |
1146 | 1141 | ||
1147 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1142 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
@@ -1234,7 +1229,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1234 | spin_unlock(¤t->sighand->siglock); | 1229 | spin_unlock(¤t->sighand->siglock); |
1235 | write_unlock_irq(&tasklist_lock); | 1230 | write_unlock_irq(&tasklist_lock); |
1236 | retval = -ERESTARTNOINTR; | 1231 | retval = -ERESTARTNOINTR; |
1237 | goto bad_fork_free_pid; | 1232 | goto bad_fork_free_graph; |
1238 | } | 1233 | } |
1239 | 1234 | ||
1240 | if (clone_flags & CLONE_THREAD) { | 1235 | if (clone_flags & CLONE_THREAD) { |
@@ -1271,6 +1266,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1271 | cgroup_post_fork(p); | 1266 | cgroup_post_fork(p); |
1272 | return p; | 1267 | return p; |
1273 | 1268 | ||
1269 | bad_fork_free_graph: | ||
1270 | ftrace_graph_exit_task(p); | ||
1274 | bad_fork_free_pid: | 1271 | bad_fork_free_pid: |
1275 | if (pid != &init_struct_pid) | 1272 | if (pid != &init_struct_pid) |
1276 | free_pid(pid); | 1273 | free_pid(pid); |
@@ -1278,8 +1275,6 @@ bad_fork_cleanup_io: | |||
1278 | put_io_context(p->io_context); | 1275 | put_io_context(p->io_context); |
1279 | bad_fork_cleanup_namespaces: | 1276 | bad_fork_cleanup_namespaces: |
1280 | exit_task_namespaces(p); | 1277 | exit_task_namespaces(p); |
1281 | bad_fork_cleanup_keys: | ||
1282 | exit_keys(p); | ||
1283 | bad_fork_cleanup_mm: | 1278 | bad_fork_cleanup_mm: |
1284 | if (p->mm) | 1279 | if (p->mm) |
1285 | mmput(p->mm); | 1280 | mmput(p->mm); |
@@ -1295,8 +1290,6 @@ bad_fork_cleanup_semundo: | |||
1295 | exit_sem(p); | 1290 | exit_sem(p); |
1296 | bad_fork_cleanup_audit: | 1291 | bad_fork_cleanup_audit: |
1297 | audit_free(p); | 1292 | audit_free(p); |
1298 | bad_fork_cleanup_security: | ||
1299 | security_task_free(p); | ||
1300 | bad_fork_cleanup_policy: | 1293 | bad_fork_cleanup_policy: |
1301 | #ifdef CONFIG_NUMA | 1294 | #ifdef CONFIG_NUMA |
1302 | mpol_put(p->mempolicy); | 1295 | mpol_put(p->mempolicy); |
@@ -1309,9 +1302,9 @@ bad_fork_cleanup_cgroup: | |||
1309 | bad_fork_cleanup_put_domain: | 1302 | bad_fork_cleanup_put_domain: |
1310 | module_put(task_thread_info(p)->exec_domain->module); | 1303 | module_put(task_thread_info(p)->exec_domain->module); |
1311 | bad_fork_cleanup_count: | 1304 | bad_fork_cleanup_count: |
1312 | put_group_info(p->group_info); | 1305 | atomic_dec(&p->cred->user->processes); |
1313 | atomic_dec(&p->user->processes); | 1306 | put_cred(p->real_cred); |
1314 | free_uid(p->user); | 1307 | put_cred(p->cred); |
1315 | bad_fork_free: | 1308 | bad_fork_free: |
1316 | free_task(p); | 1309 | free_task(p); |
1317 | fork_out: | 1310 | fork_out: |
@@ -1355,6 +1348,21 @@ long do_fork(unsigned long clone_flags, | |||
1355 | long nr; | 1348 | long nr; |
1356 | 1349 | ||
1357 | /* | 1350 | /* |
1351 | * Do some preliminary argument and permissions checking before we | ||
1352 | * actually start allocating stuff | ||
1353 | */ | ||
1354 | if (clone_flags & CLONE_NEWUSER) { | ||
1355 | if (clone_flags & CLONE_THREAD) | ||
1356 | return -EINVAL; | ||
1357 | /* hopefully this check will go away when userns support is | ||
1358 | * complete | ||
1359 | */ | ||
1360 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) || | ||
1361 | !capable(CAP_SETGID)) | ||
1362 | return -EPERM; | ||
1363 | } | ||
1364 | |||
1365 | /* | ||
1358 | * We hope to recycle these flags after 2.6.26 | 1366 | * We hope to recycle these flags after 2.6.26 |
1359 | */ | 1367 | */ |
1360 | if (unlikely(clone_flags & CLONE_STOPPED)) { | 1368 | if (unlikely(clone_flags & CLONE_STOPPED)) { |
@@ -1398,6 +1406,7 @@ long do_fork(unsigned long clone_flags, | |||
1398 | init_completion(&vfork); | 1406 | init_completion(&vfork); |
1399 | } | 1407 | } |
1400 | 1408 | ||
1409 | audit_finish_fork(p); | ||
1401 | tracehook_report_clone(trace, regs, clone_flags, nr, p); | 1410 | tracehook_report_clone(trace, regs, clone_flags, nr, p); |
1402 | 1411 | ||
1403 | /* | 1412 | /* |
@@ -1601,8 +1610,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) | |||
1601 | err = -EINVAL; | 1610 | err = -EINVAL; |
1602 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| | 1611 | if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| |
1603 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| | 1612 | CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| |
1604 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER| | 1613 | CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET)) |
1605 | CLONE_NEWNET)) | ||
1606 | goto bad_unshare_out; | 1614 | goto bad_unshare_out; |
1607 | 1615 | ||
1608 | /* | 1616 | /* |
diff --git a/kernel/futex.c b/kernel/futex.c index 8af10027514b..4fe790e89d0f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -439,13 +439,20 @@ static void free_pi_state(struct futex_pi_state *pi_state) | |||
439 | static struct task_struct * futex_find_get_task(pid_t pid) | 439 | static struct task_struct * futex_find_get_task(pid_t pid) |
440 | { | 440 | { |
441 | struct task_struct *p; | 441 | struct task_struct *p; |
442 | const struct cred *cred = current_cred(), *pcred; | ||
442 | 443 | ||
443 | rcu_read_lock(); | 444 | rcu_read_lock(); |
444 | p = find_task_by_vpid(pid); | 445 | p = find_task_by_vpid(pid); |
445 | if (!p || ((current->euid != p->euid) && (current->euid != p->uid))) | 446 | if (!p) { |
446 | p = ERR_PTR(-ESRCH); | 447 | p = ERR_PTR(-ESRCH); |
447 | else | 448 | } else { |
448 | get_task_struct(p); | 449 | pcred = __task_cred(p); |
450 | if (cred->euid != pcred->euid && | ||
451 | cred->euid != pcred->uid) | ||
452 | p = ERR_PTR(-ESRCH); | ||
453 | else | ||
454 | get_task_struct(p); | ||
455 | } | ||
449 | 456 | ||
450 | rcu_read_unlock(); | 457 | rcu_read_unlock(); |
451 | 458 | ||
@@ -1829,6 +1836,7 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, | |||
1829 | { | 1836 | { |
1830 | struct robust_list_head __user *head; | 1837 | struct robust_list_head __user *head; |
1831 | unsigned long ret; | 1838 | unsigned long ret; |
1839 | const struct cred *cred = current_cred(), *pcred; | ||
1832 | 1840 | ||
1833 | if (!futex_cmpxchg_enabled) | 1841 | if (!futex_cmpxchg_enabled) |
1834 | return -ENOSYS; | 1842 | return -ENOSYS; |
@@ -1844,8 +1852,10 @@ sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, | |||
1844 | if (!p) | 1852 | if (!p) |
1845 | goto err_unlock; | 1853 | goto err_unlock; |
1846 | ret = -EPERM; | 1854 | ret = -EPERM; |
1847 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 1855 | pcred = __task_cred(p); |
1848 | !capable(CAP_SYS_PTRACE)) | 1856 | if (cred->euid != pcred->euid && |
1857 | cred->euid != pcred->uid && | ||
1858 | !capable(CAP_SYS_PTRACE)) | ||
1849 | goto err_unlock; | 1859 | goto err_unlock; |
1850 | head = p->robust_list; | 1860 | head = p->robust_list; |
1851 | rcu_read_unlock(); | 1861 | rcu_read_unlock(); |
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 04ac3a9e42cf..d607a5b9ee29 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
@@ -135,6 +135,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, | |||
135 | { | 135 | { |
136 | struct compat_robust_list_head __user *head; | 136 | struct compat_robust_list_head __user *head; |
137 | unsigned long ret; | 137 | unsigned long ret; |
138 | const struct cred *cred = current_cred(), *pcred; | ||
138 | 139 | ||
139 | if (!futex_cmpxchg_enabled) | 140 | if (!futex_cmpxchg_enabled) |
140 | return -ENOSYS; | 141 | return -ENOSYS; |
@@ -150,8 +151,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, | |||
150 | if (!p) | 151 | if (!p) |
151 | goto err_unlock; | 152 | goto err_unlock; |
152 | ret = -EPERM; | 153 | ret = -EPERM; |
153 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 154 | pcred = __task_cred(p); |
154 | !capable(CAP_SYS_PTRACE)) | 155 | if (cred->euid != pcred->euid && |
156 | cred->euid != pcred->uid && | ||
157 | !capable(CAP_SYS_PTRACE)) | ||
155 | goto err_unlock; | 158 | goto err_unlock; |
156 | head = p->compat_robust_list; | 159 | head = p->compat_robust_list; |
157 | read_unlock(&tasklist_lock); | 160 | read_unlock(&tasklist_lock); |
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index 7b8b0f21a5b1..e694afa0eb8c 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -30,20 +30,19 @@ | |||
30 | #define all_var 0 | 30 | #define all_var 0 |
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | /* These will be re-linked against their real values during the second link stage */ | 33 | extern const unsigned long kallsyms_addresses[]; |
34 | extern const unsigned long kallsyms_addresses[] __attribute__((weak)); | 34 | extern const u8 kallsyms_names[]; |
35 | extern const u8 kallsyms_names[] __attribute__((weak)); | ||
36 | 35 | ||
37 | /* tell the compiler that the count isn't in the small data section if the arch | 36 | /* tell the compiler that the count isn't in the small data section if the arch |
38 | * has one (eg: FRV) | 37 | * has one (eg: FRV) |
39 | */ | 38 | */ |
40 | extern const unsigned long kallsyms_num_syms | 39 | extern const unsigned long kallsyms_num_syms |
41 | __attribute__((weak, section(".rodata"))); | 40 | __attribute__((__section__(".rodata"))); |
42 | 41 | ||
43 | extern const u8 kallsyms_token_table[] __attribute__((weak)); | 42 | extern const u8 kallsyms_token_table[]; |
44 | extern const u16 kallsyms_token_index[] __attribute__((weak)); | 43 | extern const u16 kallsyms_token_index[]; |
45 | 44 | ||
46 | extern const unsigned long kallsyms_markers[] __attribute__((weak)); | 45 | extern const unsigned long kallsyms_markers[]; |
47 | 46 | ||
48 | static inline int is_kernel_inittext(unsigned long addr) | 47 | static inline int is_kernel_inittext(unsigned long addr) |
49 | { | 48 | { |
@@ -168,9 +167,6 @@ static unsigned long get_symbol_pos(unsigned long addr, | |||
168 | unsigned long symbol_start = 0, symbol_end = 0; | 167 | unsigned long symbol_start = 0, symbol_end = 0; |
169 | unsigned long i, low, high, mid; | 168 | unsigned long i, low, high, mid; |
170 | 169 | ||
171 | /* This kernel should never had been booted. */ | ||
172 | BUG_ON(!kallsyms_addresses); | ||
173 | |||
174 | /* do a binary search on the sorted kallsyms_addresses array */ | 170 | /* do a binary search on the sorted kallsyms_addresses array */ |
175 | low = 0; | 171 | low = 0; |
176 | high = kallsyms_num_syms; | 172 | high = kallsyms_num_syms; |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 3d3c3ea3a023..b46dbb908669 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -118,10 +118,10 @@ EXPORT_SYMBOL(request_module); | |||
118 | struct subprocess_info { | 118 | struct subprocess_info { |
119 | struct work_struct work; | 119 | struct work_struct work; |
120 | struct completion *complete; | 120 | struct completion *complete; |
121 | struct cred *cred; | ||
121 | char *path; | 122 | char *path; |
122 | char **argv; | 123 | char **argv; |
123 | char **envp; | 124 | char **envp; |
124 | struct key *ring; | ||
125 | enum umh_wait wait; | 125 | enum umh_wait wait; |
126 | int retval; | 126 | int retval; |
127 | struct file *stdin; | 127 | struct file *stdin; |
@@ -134,19 +134,20 @@ struct subprocess_info { | |||
134 | static int ____call_usermodehelper(void *data) | 134 | static int ____call_usermodehelper(void *data) |
135 | { | 135 | { |
136 | struct subprocess_info *sub_info = data; | 136 | struct subprocess_info *sub_info = data; |
137 | struct key *new_session, *old_session; | ||
138 | int retval; | 137 | int retval; |
139 | 138 | ||
140 | /* Unblock all signals and set the session keyring. */ | 139 | BUG_ON(atomic_read(&sub_info->cred->usage) != 1); |
141 | new_session = key_get(sub_info->ring); | 140 | |
141 | /* Unblock all signals */ | ||
142 | spin_lock_irq(¤t->sighand->siglock); | 142 | spin_lock_irq(¤t->sighand->siglock); |
143 | old_session = __install_session_keyring(current, new_session); | ||
144 | flush_signal_handlers(current, 1); | 143 | flush_signal_handlers(current, 1); |
145 | sigemptyset(¤t->blocked); | 144 | sigemptyset(¤t->blocked); |
146 | recalc_sigpending(); | 145 | recalc_sigpending(); |
147 | spin_unlock_irq(¤t->sighand->siglock); | 146 | spin_unlock_irq(¤t->sighand->siglock); |
148 | 147 | ||
149 | key_put(old_session); | 148 | /* Install the credentials */ |
149 | commit_creds(sub_info->cred); | ||
150 | sub_info->cred = NULL; | ||
150 | 151 | ||
151 | /* Install input pipe when needed */ | 152 | /* Install input pipe when needed */ |
152 | if (sub_info->stdin) { | 153 | if (sub_info->stdin) { |
@@ -185,6 +186,8 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info) | |||
185 | { | 186 | { |
186 | if (info->cleanup) | 187 | if (info->cleanup) |
187 | (*info->cleanup)(info->argv, info->envp); | 188 | (*info->cleanup)(info->argv, info->envp); |
189 | if (info->cred) | ||
190 | put_cred(info->cred); | ||
188 | kfree(info); | 191 | kfree(info); |
189 | } | 192 | } |
190 | EXPORT_SYMBOL(call_usermodehelper_freeinfo); | 193 | EXPORT_SYMBOL(call_usermodehelper_freeinfo); |
@@ -240,6 +243,8 @@ static void __call_usermodehelper(struct work_struct *work) | |||
240 | pid_t pid; | 243 | pid_t pid; |
241 | enum umh_wait wait = sub_info->wait; | 244 | enum umh_wait wait = sub_info->wait; |
242 | 245 | ||
246 | BUG_ON(atomic_read(&sub_info->cred->usage) != 1); | ||
247 | |||
243 | /* CLONE_VFORK: wait until the usermode helper has execve'd | 248 | /* CLONE_VFORK: wait until the usermode helper has execve'd |
244 | * successfully We need the data structures to stay around | 249 | * successfully We need the data structures to stay around |
245 | * until that is done. */ | 250 | * until that is done. */ |
@@ -362,6 +367,9 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, | |||
362 | sub_info->path = path; | 367 | sub_info->path = path; |
363 | sub_info->argv = argv; | 368 | sub_info->argv = argv; |
364 | sub_info->envp = envp; | 369 | sub_info->envp = envp; |
370 | sub_info->cred = prepare_usermodehelper_creds(); | ||
371 | if (!sub_info->cred) | ||
372 | return NULL; | ||
365 | 373 | ||
366 | out: | 374 | out: |
367 | return sub_info; | 375 | return sub_info; |
@@ -376,7 +384,13 @@ EXPORT_SYMBOL(call_usermodehelper_setup); | |||
376 | void call_usermodehelper_setkeys(struct subprocess_info *info, | 384 | void call_usermodehelper_setkeys(struct subprocess_info *info, |
377 | struct key *session_keyring) | 385 | struct key *session_keyring) |
378 | { | 386 | { |
379 | info->ring = session_keyring; | 387 | #ifdef CONFIG_KEYS |
388 | struct thread_group_cred *tgcred = info->cred->tgcred; | ||
389 | key_put(tgcred->session_keyring); | ||
390 | tgcred->session_keyring = key_get(session_keyring); | ||
391 | #else | ||
392 | BUG(); | ||
393 | #endif | ||
380 | } | 394 | } |
381 | EXPORT_SYMBOL(call_usermodehelper_setkeys); | 395 | EXPORT_SYMBOL(call_usermodehelper_setkeys); |
382 | 396 | ||
@@ -444,6 +458,8 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, | |||
444 | DECLARE_COMPLETION_ONSTACK(done); | 458 | DECLARE_COMPLETION_ONSTACK(done); |
445 | int retval = 0; | 459 | int retval = 0; |
446 | 460 | ||
461 | BUG_ON(atomic_read(&sub_info->cred->usage) != 1); | ||
462 | |||
447 | helper_lock(); | 463 | helper_lock(); |
448 | if (sub_info->path[0] == '\0') | 464 | if (sub_info->path[0] == '\0') |
449 | goto out; | 465 | goto out; |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 8e7a7ce3ed0a..4fbc456f393d 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -21,6 +21,9 @@ static DEFINE_SPINLOCK(kthread_create_lock); | |||
21 | static LIST_HEAD(kthread_create_list); | 21 | static LIST_HEAD(kthread_create_list); |
22 | struct task_struct *kthreadd_task; | 22 | struct task_struct *kthreadd_task; |
23 | 23 | ||
24 | DEFINE_TRACE(sched_kthread_stop); | ||
25 | DEFINE_TRACE(sched_kthread_stop_ret); | ||
26 | |||
24 | struct kthread_create_info | 27 | struct kthread_create_info |
25 | { | 28 | { |
26 | /* Information passed to kthread() from kthreadd. */ | 29 | /* Information passed to kthread() from kthreadd. */ |
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 5e7b45c56923..449db466bdbc 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
@@ -191,7 +191,7 @@ static int lstats_show(struct seq_file *m, void *v) | |||
191 | latency_record[i].time, | 191 | latency_record[i].time, |
192 | latency_record[i].max); | 192 | latency_record[i].max); |
193 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { | 193 | for (q = 0; q < LT_BACKTRACEDEPTH; q++) { |
194 | char sym[KSYM_NAME_LEN]; | 194 | char sym[KSYM_SYMBOL_LEN]; |
195 | char *c; | 195 | char *c; |
196 | if (!latency_record[i].backtrace[q]) | 196 | if (!latency_record[i].backtrace[q]) |
197 | break; | 197 | break; |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 46a404173db2..74b1878b8bb8 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -25,6 +25,7 @@ | |||
25 | * Thanks to Arjan van de Ven for coming up with the initial idea of | 25 | * Thanks to Arjan van de Ven for coming up with the initial idea of |
26 | * mapping lock dependencies runtime. | 26 | * mapping lock dependencies runtime. |
27 | */ | 27 | */ |
28 | #define DISABLE_BRANCH_PROFILING | ||
28 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
29 | #include <linux/sched.h> | 30 | #include <linux/sched.h> |
30 | #include <linux/delay.h> | 31 | #include <linux/delay.h> |
diff --git a/kernel/marker.c b/kernel/marker.c index e9c6b2bc9400..ea54f2647868 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -43,6 +43,7 @@ static DEFINE_MUTEX(markers_mutex); | |||
43 | */ | 43 | */ |
44 | #define MARKER_HASH_BITS 6 | 44 | #define MARKER_HASH_BITS 6 |
45 | #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS) | 45 | #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS) |
46 | static struct hlist_head marker_table[MARKER_TABLE_SIZE]; | ||
46 | 47 | ||
47 | /* | 48 | /* |
48 | * Note about RCU : | 49 | * Note about RCU : |
@@ -64,11 +65,10 @@ struct marker_entry { | |||
64 | void *oldptr; | 65 | void *oldptr; |
65 | int rcu_pending; | 66 | int rcu_pending; |
66 | unsigned char ptype:1; | 67 | unsigned char ptype:1; |
68 | unsigned char format_allocated:1; | ||
67 | char name[0]; /* Contains name'\0'format'\0' */ | 69 | char name[0]; /* Contains name'\0'format'\0' */ |
68 | }; | 70 | }; |
69 | 71 | ||
70 | static struct hlist_head marker_table[MARKER_TABLE_SIZE]; | ||
71 | |||
72 | /** | 72 | /** |
73 | * __mark_empty_function - Empty probe callback | 73 | * __mark_empty_function - Empty probe callback |
74 | * @probe_private: probe private data | 74 | * @probe_private: probe private data |
@@ -81,7 +81,7 @@ static struct hlist_head marker_table[MARKER_TABLE_SIZE]; | |||
81 | * though the function pointer change and the marker enabling are two distinct | 81 | * though the function pointer change and the marker enabling are two distinct |
82 | * operations that modifies the execution flow of preemptible code. | 82 | * operations that modifies the execution flow of preemptible code. |
83 | */ | 83 | */ |
84 | void __mark_empty_function(void *probe_private, void *call_private, | 84 | notrace void __mark_empty_function(void *probe_private, void *call_private, |
85 | const char *fmt, va_list *args) | 85 | const char *fmt, va_list *args) |
86 | { | 86 | { |
87 | } | 87 | } |
@@ -97,7 +97,8 @@ EXPORT_SYMBOL_GPL(__mark_empty_function); | |||
97 | * need to put a full smp_rmb() in this branch. This is why we do not use | 97 | * need to put a full smp_rmb() in this branch. This is why we do not use |
98 | * rcu_dereference() for the pointer read. | 98 | * rcu_dereference() for the pointer read. |
99 | */ | 99 | */ |
100 | void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | 100 | notrace void marker_probe_cb(const struct marker *mdata, |
101 | void *call_private, ...) | ||
101 | { | 102 | { |
102 | va_list args; | 103 | va_list args; |
103 | char ptype; | 104 | char ptype; |
@@ -107,7 +108,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
107 | * sure the teardown of the callbacks can be done correctly when they | 108 | * sure the teardown of the callbacks can be done correctly when they |
108 | * are in modules and they insure RCU read coherency. | 109 | * are in modules and they insure RCU read coherency. |
109 | */ | 110 | */ |
110 | rcu_read_lock_sched(); | 111 | rcu_read_lock_sched_notrace(); |
111 | ptype = mdata->ptype; | 112 | ptype = mdata->ptype; |
112 | if (likely(!ptype)) { | 113 | if (likely(!ptype)) { |
113 | marker_probe_func *func; | 114 | marker_probe_func *func; |
@@ -145,7 +146,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...) | |||
145 | va_end(args); | 146 | va_end(args); |
146 | } | 147 | } |
147 | } | 148 | } |
148 | rcu_read_unlock_sched(); | 149 | rcu_read_unlock_sched_notrace(); |
149 | } | 150 | } |
150 | EXPORT_SYMBOL_GPL(marker_probe_cb); | 151 | EXPORT_SYMBOL_GPL(marker_probe_cb); |
151 | 152 | ||
@@ -157,12 +158,13 @@ EXPORT_SYMBOL_GPL(marker_probe_cb); | |||
157 | * | 158 | * |
158 | * Should be connected to markers "MARK_NOARGS". | 159 | * Should be connected to markers "MARK_NOARGS". |
159 | */ | 160 | */ |
160 | void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | 161 | static notrace void marker_probe_cb_noarg(const struct marker *mdata, |
162 | void *call_private, ...) | ||
161 | { | 163 | { |
162 | va_list args; /* not initialized */ | 164 | va_list args; /* not initialized */ |
163 | char ptype; | 165 | char ptype; |
164 | 166 | ||
165 | rcu_read_lock_sched(); | 167 | rcu_read_lock_sched_notrace(); |
166 | ptype = mdata->ptype; | 168 | ptype = mdata->ptype; |
167 | if (likely(!ptype)) { | 169 | if (likely(!ptype)) { |
168 | marker_probe_func *func; | 170 | marker_probe_func *func; |
@@ -195,9 +197,8 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...) | |||
195 | multi[i].func(multi[i].probe_private, call_private, | 197 | multi[i].func(multi[i].probe_private, call_private, |
196 | mdata->format, &args); | 198 | mdata->format, &args); |
197 | } | 199 | } |
198 | rcu_read_unlock_sched(); | 200 | rcu_read_unlock_sched_notrace(); |
199 | } | 201 | } |
200 | EXPORT_SYMBOL_GPL(marker_probe_cb_noarg); | ||
201 | 202 | ||
202 | static void free_old_closure(struct rcu_head *head) | 203 | static void free_old_closure(struct rcu_head *head) |
203 | { | 204 | { |
@@ -416,6 +417,7 @@ static struct marker_entry *add_marker(const char *name, const char *format) | |||
416 | e->single.probe_private = NULL; | 417 | e->single.probe_private = NULL; |
417 | e->multi = NULL; | 418 | e->multi = NULL; |
418 | e->ptype = 0; | 419 | e->ptype = 0; |
420 | e->format_allocated = 0; | ||
419 | e->refcount = 0; | 421 | e->refcount = 0; |
420 | e->rcu_pending = 0; | 422 | e->rcu_pending = 0; |
421 | hlist_add_head(&e->hlist, head); | 423 | hlist_add_head(&e->hlist, head); |
@@ -447,6 +449,8 @@ static int remove_marker(const char *name) | |||
447 | if (e->single.func != __mark_empty_function) | 449 | if (e->single.func != __mark_empty_function) |
448 | return -EBUSY; | 450 | return -EBUSY; |
449 | hlist_del(&e->hlist); | 451 | hlist_del(&e->hlist); |
452 | if (e->format_allocated) | ||
453 | kfree(e->format); | ||
450 | /* Make sure the call_rcu has been executed */ | 454 | /* Make sure the call_rcu has been executed */ |
451 | if (e->rcu_pending) | 455 | if (e->rcu_pending) |
452 | rcu_barrier_sched(); | 456 | rcu_barrier_sched(); |
@@ -457,57 +461,34 @@ static int remove_marker(const char *name) | |||
457 | /* | 461 | /* |
458 | * Set the mark_entry format to the format found in the element. | 462 | * Set the mark_entry format to the format found in the element. |
459 | */ | 463 | */ |
460 | static int marker_set_format(struct marker_entry **entry, const char *format) | 464 | static int marker_set_format(struct marker_entry *entry, const char *format) |
461 | { | 465 | { |
462 | struct marker_entry *e; | 466 | entry->format = kstrdup(format, GFP_KERNEL); |
463 | size_t name_len = strlen((*entry)->name) + 1; | 467 | if (!entry->format) |
464 | size_t format_len = strlen(format) + 1; | ||
465 | |||
466 | |||
467 | e = kmalloc(sizeof(struct marker_entry) + name_len + format_len, | ||
468 | GFP_KERNEL); | ||
469 | if (!e) | ||
470 | return -ENOMEM; | 468 | return -ENOMEM; |
471 | memcpy(&e->name[0], (*entry)->name, name_len); | 469 | entry->format_allocated = 1; |
472 | e->format = &e->name[name_len]; | 470 | |
473 | memcpy(e->format, format, format_len); | ||
474 | if (strcmp(e->format, MARK_NOARGS) == 0) | ||
475 | e->call = marker_probe_cb_noarg; | ||
476 | else | ||
477 | e->call = marker_probe_cb; | ||
478 | e->single = (*entry)->single; | ||
479 | e->multi = (*entry)->multi; | ||
480 | e->ptype = (*entry)->ptype; | ||
481 | e->refcount = (*entry)->refcount; | ||
482 | e->rcu_pending = 0; | ||
483 | hlist_add_before(&e->hlist, &(*entry)->hlist); | ||
484 | hlist_del(&(*entry)->hlist); | ||
485 | /* Make sure the call_rcu has been executed */ | ||
486 | if ((*entry)->rcu_pending) | ||
487 | rcu_barrier_sched(); | ||
488 | kfree(*entry); | ||
489 | *entry = e; | ||
490 | trace_mark(core_marker_format, "name %s format %s", | 471 | trace_mark(core_marker_format, "name %s format %s", |
491 | e->name, e->format); | 472 | entry->name, entry->format); |
492 | return 0; | 473 | return 0; |
493 | } | 474 | } |
494 | 475 | ||
495 | /* | 476 | /* |
496 | * Sets the probe callback corresponding to one marker. | 477 | * Sets the probe callback corresponding to one marker. |
497 | */ | 478 | */ |
498 | static int set_marker(struct marker_entry **entry, struct marker *elem, | 479 | static int set_marker(struct marker_entry *entry, struct marker *elem, |
499 | int active) | 480 | int active) |
500 | { | 481 | { |
501 | int ret; | 482 | int ret = 0; |
502 | WARN_ON(strcmp((*entry)->name, elem->name) != 0); | 483 | WARN_ON(strcmp(entry->name, elem->name) != 0); |
503 | 484 | ||
504 | if ((*entry)->format) { | 485 | if (entry->format) { |
505 | if (strcmp((*entry)->format, elem->format) != 0) { | 486 | if (strcmp(entry->format, elem->format) != 0) { |
506 | printk(KERN_NOTICE | 487 | printk(KERN_NOTICE |
507 | "Format mismatch for probe %s " | 488 | "Format mismatch for probe %s " |
508 | "(%s), marker (%s)\n", | 489 | "(%s), marker (%s)\n", |
509 | (*entry)->name, | 490 | entry->name, |
510 | (*entry)->format, | 491 | entry->format, |
511 | elem->format); | 492 | elem->format); |
512 | return -EPERM; | 493 | return -EPERM; |
513 | } | 494 | } |
@@ -523,37 +504,67 @@ static int set_marker(struct marker_entry **entry, struct marker *elem, | |||
523 | * pass from a "safe" callback (with argument) to an "unsafe" | 504 | * pass from a "safe" callback (with argument) to an "unsafe" |
524 | * callback (does not set arguments). | 505 | * callback (does not set arguments). |
525 | */ | 506 | */ |
526 | elem->call = (*entry)->call; | 507 | elem->call = entry->call; |
527 | /* | 508 | /* |
528 | * Sanity check : | 509 | * Sanity check : |
529 | * We only update the single probe private data when the ptr is | 510 | * We only update the single probe private data when the ptr is |
530 | * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1) | 511 | * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1) |
531 | */ | 512 | */ |
532 | WARN_ON(elem->single.func != __mark_empty_function | 513 | WARN_ON(elem->single.func != __mark_empty_function |
533 | && elem->single.probe_private | 514 | && elem->single.probe_private != entry->single.probe_private |
534 | != (*entry)->single.probe_private && | 515 | && !elem->ptype); |
535 | !elem->ptype); | 516 | elem->single.probe_private = entry->single.probe_private; |
536 | elem->single.probe_private = (*entry)->single.probe_private; | ||
537 | /* | 517 | /* |
538 | * Make sure the private data is valid when we update the | 518 | * Make sure the private data is valid when we update the |
539 | * single probe ptr. | 519 | * single probe ptr. |
540 | */ | 520 | */ |
541 | smp_wmb(); | 521 | smp_wmb(); |
542 | elem->single.func = (*entry)->single.func; | 522 | elem->single.func = entry->single.func; |
543 | /* | 523 | /* |
544 | * We also make sure that the new probe callbacks array is consistent | 524 | * We also make sure that the new probe callbacks array is consistent |
545 | * before setting a pointer to it. | 525 | * before setting a pointer to it. |
546 | */ | 526 | */ |
547 | rcu_assign_pointer(elem->multi, (*entry)->multi); | 527 | rcu_assign_pointer(elem->multi, entry->multi); |
548 | /* | 528 | /* |
549 | * Update the function or multi probe array pointer before setting the | 529 | * Update the function or multi probe array pointer before setting the |
550 | * ptype. | 530 | * ptype. |
551 | */ | 531 | */ |
552 | smp_wmb(); | 532 | smp_wmb(); |
553 | elem->ptype = (*entry)->ptype; | 533 | elem->ptype = entry->ptype; |
534 | |||
535 | if (elem->tp_name && (active ^ elem->state)) { | ||
536 | WARN_ON(!elem->tp_cb); | ||
537 | /* | ||
538 | * It is ok to directly call the probe registration because type | ||
539 | * checking has been done in the __trace_mark_tp() macro. | ||
540 | */ | ||
541 | |||
542 | if (active) { | ||
543 | /* | ||
544 | * try_module_get should always succeed because we hold | ||
545 | * lock_module() to get the tp_cb address. | ||
546 | */ | ||
547 | ret = try_module_get(__module_text_address( | ||
548 | (unsigned long)elem->tp_cb)); | ||
549 | BUG_ON(!ret); | ||
550 | ret = tracepoint_probe_register_noupdate( | ||
551 | elem->tp_name, | ||
552 | elem->tp_cb); | ||
553 | } else { | ||
554 | ret = tracepoint_probe_unregister_noupdate( | ||
555 | elem->tp_name, | ||
556 | elem->tp_cb); | ||
557 | /* | ||
558 | * tracepoint_probe_update_all() must be called | ||
559 | * before the module containing tp_cb is unloaded. | ||
560 | */ | ||
561 | module_put(__module_text_address( | ||
562 | (unsigned long)elem->tp_cb)); | ||
563 | } | ||
564 | } | ||
554 | elem->state = active; | 565 | elem->state = active; |
555 | 566 | ||
556 | return 0; | 567 | return ret; |
557 | } | 568 | } |
558 | 569 | ||
559 | /* | 570 | /* |
@@ -564,7 +575,24 @@ static int set_marker(struct marker_entry **entry, struct marker *elem, | |||
564 | */ | 575 | */ |
565 | static void disable_marker(struct marker *elem) | 576 | static void disable_marker(struct marker *elem) |
566 | { | 577 | { |
578 | int ret; | ||
579 | |||
567 | /* leave "call" as is. It is known statically. */ | 580 | /* leave "call" as is. It is known statically. */ |
581 | if (elem->tp_name && elem->state) { | ||
582 | WARN_ON(!elem->tp_cb); | ||
583 | /* | ||
584 | * It is ok to directly call the probe registration because type | ||
585 | * checking has been done in the __trace_mark_tp() macro. | ||
586 | */ | ||
587 | ret = tracepoint_probe_unregister_noupdate(elem->tp_name, | ||
588 | elem->tp_cb); | ||
589 | WARN_ON(ret); | ||
590 | /* | ||
591 | * tracepoint_probe_update_all() must be called | ||
592 | * before the module containing tp_cb is unloaded. | ||
593 | */ | ||
594 | module_put(__module_text_address((unsigned long)elem->tp_cb)); | ||
595 | } | ||
568 | elem->state = 0; | 596 | elem->state = 0; |
569 | elem->single.func = __mark_empty_function; | 597 | elem->single.func = __mark_empty_function; |
570 | /* Update the function before setting the ptype */ | 598 | /* Update the function before setting the ptype */ |
@@ -594,8 +622,7 @@ void marker_update_probe_range(struct marker *begin, | |||
594 | for (iter = begin; iter < end; iter++) { | 622 | for (iter = begin; iter < end; iter++) { |
595 | mark_entry = get_marker(iter->name); | 623 | mark_entry = get_marker(iter->name); |
596 | if (mark_entry) { | 624 | if (mark_entry) { |
597 | set_marker(&mark_entry, iter, | 625 | set_marker(mark_entry, iter, !!mark_entry->refcount); |
598 | !!mark_entry->refcount); | ||
599 | /* | 626 | /* |
600 | * ignore error, continue | 627 | * ignore error, continue |
601 | */ | 628 | */ |
@@ -629,6 +656,7 @@ static void marker_update_probes(void) | |||
629 | marker_update_probe_range(__start___markers, __stop___markers); | 656 | marker_update_probe_range(__start___markers, __stop___markers); |
630 | /* Markers in modules. */ | 657 | /* Markers in modules. */ |
631 | module_update_markers(); | 658 | module_update_markers(); |
659 | tracepoint_probe_update_all(); | ||
632 | } | 660 | } |
633 | 661 | ||
634 | /** | 662 | /** |
@@ -657,7 +685,7 @@ int marker_probe_register(const char *name, const char *format, | |||
657 | ret = PTR_ERR(entry); | 685 | ret = PTR_ERR(entry); |
658 | } else if (format) { | 686 | } else if (format) { |
659 | if (!entry->format) | 687 | if (!entry->format) |
660 | ret = marker_set_format(&entry, format); | 688 | ret = marker_set_format(entry, format); |
661 | else if (strcmp(entry->format, format)) | 689 | else if (strcmp(entry->format, format)) |
662 | ret = -EPERM; | 690 | ret = -EPERM; |
663 | } | 691 | } |
@@ -676,10 +704,11 @@ int marker_probe_register(const char *name, const char *format, | |||
676 | goto end; | 704 | goto end; |
677 | } | 705 | } |
678 | mutex_unlock(&markers_mutex); | 706 | mutex_unlock(&markers_mutex); |
679 | marker_update_probes(); /* may update entry */ | 707 | marker_update_probes(); |
680 | mutex_lock(&markers_mutex); | 708 | mutex_lock(&markers_mutex); |
681 | entry = get_marker(name); | 709 | entry = get_marker(name); |
682 | WARN_ON(!entry); | 710 | if (!entry) |
711 | goto end; | ||
683 | if (entry->rcu_pending) | 712 | if (entry->rcu_pending) |
684 | rcu_barrier_sched(); | 713 | rcu_barrier_sched(); |
685 | entry->oldptr = old; | 714 | entry->oldptr = old; |
@@ -720,7 +749,7 @@ int marker_probe_unregister(const char *name, | |||
720 | rcu_barrier_sched(); | 749 | rcu_barrier_sched(); |
721 | old = marker_entry_remove_probe(entry, probe, probe_private); | 750 | old = marker_entry_remove_probe(entry, probe, probe_private); |
722 | mutex_unlock(&markers_mutex); | 751 | mutex_unlock(&markers_mutex); |
723 | marker_update_probes(); /* may update entry */ | 752 | marker_update_probes(); |
724 | mutex_lock(&markers_mutex); | 753 | mutex_lock(&markers_mutex); |
725 | entry = get_marker(name); | 754 | entry = get_marker(name); |
726 | if (!entry) | 755 | if (!entry) |
@@ -801,10 +830,11 @@ int marker_probe_unregister_private_data(marker_probe_func *probe, | |||
801 | rcu_barrier_sched(); | 830 | rcu_barrier_sched(); |
802 | old = marker_entry_remove_probe(entry, NULL, probe_private); | 831 | old = marker_entry_remove_probe(entry, NULL, probe_private); |
803 | mutex_unlock(&markers_mutex); | 832 | mutex_unlock(&markers_mutex); |
804 | marker_update_probes(); /* may update entry */ | 833 | marker_update_probes(); |
805 | mutex_lock(&markers_mutex); | 834 | mutex_lock(&markers_mutex); |
806 | entry = get_marker_from_private_data(probe, probe_private); | 835 | entry = get_marker_from_private_data(probe, probe_private); |
807 | WARN_ON(!entry); | 836 | if (!entry) |
837 | goto end; | ||
808 | if (entry->rcu_pending) | 838 | if (entry->rcu_pending) |
809 | rcu_barrier_sched(); | 839 | rcu_barrier_sched(); |
810 | entry->oldptr = old; | 840 | entry->oldptr = old; |
@@ -848,8 +878,6 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe, | |||
848 | if (!e->ptype) { | 878 | if (!e->ptype) { |
849 | if (num == 0 && e->single.func == probe) | 879 | if (num == 0 && e->single.func == probe) |
850 | return e->single.probe_private; | 880 | return e->single.probe_private; |
851 | else | ||
852 | break; | ||
853 | } else { | 881 | } else { |
854 | struct marker_probe_closure *closure; | 882 | struct marker_probe_closure *closure; |
855 | int match = 0; | 883 | int match = 0; |
@@ -861,8 +889,42 @@ void *marker_get_private_data(const char *name, marker_probe_func *probe, | |||
861 | return closure[i].probe_private; | 889 | return closure[i].probe_private; |
862 | } | 890 | } |
863 | } | 891 | } |
892 | break; | ||
864 | } | 893 | } |
865 | } | 894 | } |
866 | return ERR_PTR(-ENOENT); | 895 | return ERR_PTR(-ENOENT); |
867 | } | 896 | } |
868 | EXPORT_SYMBOL_GPL(marker_get_private_data); | 897 | EXPORT_SYMBOL_GPL(marker_get_private_data); |
898 | |||
899 | #ifdef CONFIG_MODULES | ||
900 | |||
901 | int marker_module_notify(struct notifier_block *self, | ||
902 | unsigned long val, void *data) | ||
903 | { | ||
904 | struct module *mod = data; | ||
905 | |||
906 | switch (val) { | ||
907 | case MODULE_STATE_COMING: | ||
908 | marker_update_probe_range(mod->markers, | ||
909 | mod->markers + mod->num_markers); | ||
910 | break; | ||
911 | case MODULE_STATE_GOING: | ||
912 | marker_update_probe_range(mod->markers, | ||
913 | mod->markers + mod->num_markers); | ||
914 | break; | ||
915 | } | ||
916 | return 0; | ||
917 | } | ||
918 | |||
919 | struct notifier_block marker_module_nb = { | ||
920 | .notifier_call = marker_module_notify, | ||
921 | .priority = 0, | ||
922 | }; | ||
923 | |||
924 | static int init_markers(void) | ||
925 | { | ||
926 | return register_module_notifier(&marker_module_nb); | ||
927 | } | ||
928 | __initcall(init_markers); | ||
929 | |||
930 | #endif /* CONFIG_MODULES */ | ||
diff --git a/kernel/module.c b/kernel/module.c index 1f4cc00e0c20..dd2a54155b54 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2184,24 +2184,15 @@ static noinline struct module *load_module(void __user *umod, | |||
2184 | struct mod_debug *debug; | 2184 | struct mod_debug *debug; |
2185 | unsigned int num_debug; | 2185 | unsigned int num_debug; |
2186 | 2186 | ||
2187 | #ifdef CONFIG_MARKERS | ||
2188 | marker_update_probe_range(mod->markers, | ||
2189 | mod->markers + mod->num_markers); | ||
2190 | #endif | ||
2191 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", | 2187 | debug = section_objs(hdr, sechdrs, secstrings, "__verbose", |
2192 | sizeof(*debug), &num_debug); | 2188 | sizeof(*debug), &num_debug); |
2193 | dynamic_printk_setup(debug, num_debug); | 2189 | dynamic_printk_setup(debug, num_debug); |
2194 | |||
2195 | #ifdef CONFIG_TRACEPOINTS | ||
2196 | tracepoint_update_probe_range(mod->tracepoints, | ||
2197 | mod->tracepoints + mod->num_tracepoints); | ||
2198 | #endif | ||
2199 | } | 2190 | } |
2200 | 2191 | ||
2201 | /* sechdrs[0].sh_size is always zero */ | 2192 | /* sechdrs[0].sh_size is always zero */ |
2202 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", | 2193 | mseg = section_objs(hdr, sechdrs, secstrings, "__mcount_loc", |
2203 | sizeof(*mseg), &num_mcount); | 2194 | sizeof(*mseg), &num_mcount); |
2204 | ftrace_init_module(mseg, mseg + num_mcount); | 2195 | ftrace_init_module(mod, mseg, mseg + num_mcount); |
2205 | 2196 | ||
2206 | err = module_finalize(hdr, sechdrs, mod); | 2197 | err = module_finalize(hdr, sechdrs, mod); |
2207 | if (err < 0) | 2198 | if (err < 0) |
@@ -2713,7 +2704,7 @@ int is_module_address(unsigned long addr) | |||
2713 | 2704 | ||
2714 | 2705 | ||
2715 | /* Is this a valid kernel address? */ | 2706 | /* Is this a valid kernel address? */ |
2716 | struct module *__module_text_address(unsigned long addr) | 2707 | __notrace_funcgraph struct module *__module_text_address(unsigned long addr) |
2717 | { | 2708 | { |
2718 | struct module *mod; | 2709 | struct module *mod; |
2719 | 2710 | ||
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 1d3ef29a2583..63598dca2d0c 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -80,12 +80,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, | |||
80 | goto out_pid; | 80 | goto out_pid; |
81 | } | 81 | } |
82 | 82 | ||
83 | new_nsp->user_ns = copy_user_ns(flags, tsk->nsproxy->user_ns); | ||
84 | if (IS_ERR(new_nsp->user_ns)) { | ||
85 | err = PTR_ERR(new_nsp->user_ns); | ||
86 | goto out_user; | ||
87 | } | ||
88 | |||
89 | new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns); | 83 | new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns); |
90 | if (IS_ERR(new_nsp->net_ns)) { | 84 | if (IS_ERR(new_nsp->net_ns)) { |
91 | err = PTR_ERR(new_nsp->net_ns); | 85 | err = PTR_ERR(new_nsp->net_ns); |
@@ -95,9 +89,6 @@ static struct nsproxy *create_new_namespaces(unsigned long flags, | |||
95 | return new_nsp; | 89 | return new_nsp; |
96 | 90 | ||
97 | out_net: | 91 | out_net: |
98 | if (new_nsp->user_ns) | ||
99 | put_user_ns(new_nsp->user_ns); | ||
100 | out_user: | ||
101 | if (new_nsp->pid_ns) | 92 | if (new_nsp->pid_ns) |
102 | put_pid_ns(new_nsp->pid_ns); | 93 | put_pid_ns(new_nsp->pid_ns); |
103 | out_pid: | 94 | out_pid: |
@@ -130,7 +121,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) | |||
130 | get_nsproxy(old_ns); | 121 | get_nsproxy(old_ns); |
131 | 122 | ||
132 | if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | | 123 | if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | |
133 | CLONE_NEWUSER | CLONE_NEWPID | CLONE_NEWNET))) | 124 | CLONE_NEWPID | CLONE_NEWNET))) |
134 | return 0; | 125 | return 0; |
135 | 126 | ||
136 | if (!capable(CAP_SYS_ADMIN)) { | 127 | if (!capable(CAP_SYS_ADMIN)) { |
@@ -173,8 +164,6 @@ void free_nsproxy(struct nsproxy *ns) | |||
173 | put_ipc_ns(ns->ipc_ns); | 164 | put_ipc_ns(ns->ipc_ns); |
174 | if (ns->pid_ns) | 165 | if (ns->pid_ns) |
175 | put_pid_ns(ns->pid_ns); | 166 | put_pid_ns(ns->pid_ns); |
176 | if (ns->user_ns) | ||
177 | put_user_ns(ns->user_ns); | ||
178 | put_net(ns->net_ns); | 167 | put_net(ns->net_ns); |
179 | kmem_cache_free(nsproxy_cachep, ns); | 168 | kmem_cache_free(nsproxy_cachep, ns); |
180 | } | 169 | } |
@@ -189,7 +178,7 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags, | |||
189 | int err = 0; | 178 | int err = 0; |
190 | 179 | ||
191 | if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | | 180 | if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | |
192 | CLONE_NEWUSER | CLONE_NEWNET))) | 181 | CLONE_NEWNET))) |
193 | return 0; | 182 | return 0; |
194 | 183 | ||
195 | if (!capable(CAP_SYS_ADMIN)) | 184 | if (!capable(CAP_SYS_ADMIN)) |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 895337b16a24..4e5288a831de 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -311,7 +311,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
311 | struct task_cputime cputime; | 311 | struct task_cputime cputime; |
312 | 312 | ||
313 | thread_group_cputime(p, &cputime); | 313 | thread_group_cputime(p, &cputime); |
314 | switch (which_clock) { | 314 | switch (CPUCLOCK_WHICH(which_clock)) { |
315 | default: | 315 | default: |
316 | return -EINVAL; | 316 | return -EINVAL; |
317 | case CPUCLOCK_PROF: | 317 | case CPUCLOCK_PROF: |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 5e79c662294b..a140e44eebba 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -197,6 +197,11 @@ static int common_timer_create(struct k_itimer *new_timer) | |||
197 | return 0; | 197 | return 0; |
198 | } | 198 | } |
199 | 199 | ||
200 | static int no_timer_create(struct k_itimer *new_timer) | ||
201 | { | ||
202 | return -EOPNOTSUPP; | ||
203 | } | ||
204 | |||
200 | /* | 205 | /* |
201 | * Return nonzero if we know a priori this clockid_t value is bogus. | 206 | * Return nonzero if we know a priori this clockid_t value is bogus. |
202 | */ | 207 | */ |
@@ -248,6 +253,7 @@ static __init int init_posix_timers(void) | |||
248 | .clock_getres = hrtimer_get_res, | 253 | .clock_getres = hrtimer_get_res, |
249 | .clock_get = posix_get_monotonic_raw, | 254 | .clock_get = posix_get_monotonic_raw, |
250 | .clock_set = do_posix_clock_nosettime, | 255 | .clock_set = do_posix_clock_nosettime, |
256 | .timer_create = no_timer_create, | ||
251 | }; | 257 | }; |
252 | 258 | ||
253 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); | 259 | register_posix_clock(CLOCK_REALTIME, &clock_realtime); |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index c9d74083746f..f77d3819ef57 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/freezer.h> | 24 | #include <linux/freezer.h> |
25 | #include <linux/ftrace.h> | ||
26 | 25 | ||
27 | #include "power.h" | 26 | #include "power.h" |
28 | 27 | ||
@@ -257,7 +256,7 @@ static int create_image(int platform_mode) | |||
257 | 256 | ||
258 | int hibernation_snapshot(int platform_mode) | 257 | int hibernation_snapshot(int platform_mode) |
259 | { | 258 | { |
260 | int error, ftrace_save; | 259 | int error; |
261 | 260 | ||
262 | /* Free memory before shutting down devices. */ | 261 | /* Free memory before shutting down devices. */ |
263 | error = swsusp_shrink_memory(); | 262 | error = swsusp_shrink_memory(); |
@@ -269,7 +268,6 @@ int hibernation_snapshot(int platform_mode) | |||
269 | goto Close; | 268 | goto Close; |
270 | 269 | ||
271 | suspend_console(); | 270 | suspend_console(); |
272 | ftrace_save = __ftrace_enabled_save(); | ||
273 | error = device_suspend(PMSG_FREEZE); | 271 | error = device_suspend(PMSG_FREEZE); |
274 | if (error) | 272 | if (error) |
275 | goto Recover_platform; | 273 | goto Recover_platform; |
@@ -299,7 +297,6 @@ int hibernation_snapshot(int platform_mode) | |||
299 | Resume_devices: | 297 | Resume_devices: |
300 | device_resume(in_suspend ? | 298 | device_resume(in_suspend ? |
301 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); | 299 | (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); |
302 | __ftrace_enabled_restore(ftrace_save); | ||
303 | resume_console(); | 300 | resume_console(); |
304 | Close: | 301 | Close: |
305 | platform_end(platform_mode); | 302 | platform_end(platform_mode); |
@@ -370,11 +367,10 @@ static int resume_target_kernel(void) | |||
370 | 367 | ||
371 | int hibernation_restore(int platform_mode) | 368 | int hibernation_restore(int platform_mode) |
372 | { | 369 | { |
373 | int error, ftrace_save; | 370 | int error; |
374 | 371 | ||
375 | pm_prepare_console(); | 372 | pm_prepare_console(); |
376 | suspend_console(); | 373 | suspend_console(); |
377 | ftrace_save = __ftrace_enabled_save(); | ||
378 | error = device_suspend(PMSG_QUIESCE); | 374 | error = device_suspend(PMSG_QUIESCE); |
379 | if (error) | 375 | if (error) |
380 | goto Finish; | 376 | goto Finish; |
@@ -389,7 +385,6 @@ int hibernation_restore(int platform_mode) | |||
389 | platform_restore_cleanup(platform_mode); | 385 | platform_restore_cleanup(platform_mode); |
390 | device_resume(PMSG_RECOVER); | 386 | device_resume(PMSG_RECOVER); |
391 | Finish: | 387 | Finish: |
392 | __ftrace_enabled_restore(ftrace_save); | ||
393 | resume_console(); | 388 | resume_console(); |
394 | pm_restore_console(); | 389 | pm_restore_console(); |
395 | return error; | 390 | return error; |
@@ -402,7 +397,7 @@ int hibernation_restore(int platform_mode) | |||
402 | 397 | ||
403 | int hibernation_platform_enter(void) | 398 | int hibernation_platform_enter(void) |
404 | { | 399 | { |
405 | int error, ftrace_save; | 400 | int error; |
406 | 401 | ||
407 | if (!hibernation_ops) | 402 | if (!hibernation_ops) |
408 | return -ENOSYS; | 403 | return -ENOSYS; |
@@ -417,7 +412,6 @@ int hibernation_platform_enter(void) | |||
417 | goto Close; | 412 | goto Close; |
418 | 413 | ||
419 | suspend_console(); | 414 | suspend_console(); |
420 | ftrace_save = __ftrace_enabled_save(); | ||
421 | error = device_suspend(PMSG_HIBERNATE); | 415 | error = device_suspend(PMSG_HIBERNATE); |
422 | if (error) { | 416 | if (error) { |
423 | if (hibernation_ops->recover) | 417 | if (hibernation_ops->recover) |
@@ -452,7 +446,6 @@ int hibernation_platform_enter(void) | |||
452 | hibernation_ops->finish(); | 446 | hibernation_ops->finish(); |
453 | Resume_devices: | 447 | Resume_devices: |
454 | device_resume(PMSG_RESTORE); | 448 | device_resume(PMSG_RESTORE); |
455 | __ftrace_enabled_restore(ftrace_save); | ||
456 | resume_console(); | 449 | resume_console(); |
457 | Close: | 450 | Close: |
458 | hibernation_ops->end(); | 451 | hibernation_ops->end(); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index b8f7ce9473e8..613f16941b85 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/freezer.h> | 22 | #include <linux/freezer.h> |
23 | #include <linux/vmstat.h> | 23 | #include <linux/vmstat.h> |
24 | #include <linux/syscalls.h> | 24 | #include <linux/syscalls.h> |
25 | #include <linux/ftrace.h> | ||
26 | 25 | ||
27 | #include "power.h" | 26 | #include "power.h" |
28 | 27 | ||
@@ -317,7 +316,7 @@ static int suspend_enter(suspend_state_t state) | |||
317 | */ | 316 | */ |
318 | int suspend_devices_and_enter(suspend_state_t state) | 317 | int suspend_devices_and_enter(suspend_state_t state) |
319 | { | 318 | { |
320 | int error, ftrace_save; | 319 | int error; |
321 | 320 | ||
322 | if (!suspend_ops) | 321 | if (!suspend_ops) |
323 | return -ENOSYS; | 322 | return -ENOSYS; |
@@ -328,7 +327,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
328 | goto Close; | 327 | goto Close; |
329 | } | 328 | } |
330 | suspend_console(); | 329 | suspend_console(); |
331 | ftrace_save = __ftrace_enabled_save(); | ||
332 | suspend_test_start(); | 330 | suspend_test_start(); |
333 | error = device_suspend(PMSG_SUSPEND); | 331 | error = device_suspend(PMSG_SUSPEND); |
334 | if (error) { | 332 | if (error) { |
@@ -360,7 +358,6 @@ int suspend_devices_and_enter(suspend_state_t state) | |||
360 | suspend_test_start(); | 358 | suspend_test_start(); |
361 | device_resume(PMSG_RESUME); | 359 | device_resume(PMSG_RESUME); |
362 | suspend_test_finish("resume devices"); | 360 | suspend_test_finish("resume devices"); |
363 | __ftrace_enabled_restore(ftrace_save); | ||
364 | resume_console(); | 361 | resume_console(); |
365 | Close: | 362 | Close: |
366 | if (suspend_ops->end) | 363 | if (suspend_ops->end) |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index b7713b53d07a..6da14358537c 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -633,7 +633,7 @@ void swsusp_close(fmode_t mode) | |||
633 | return; | 633 | return; |
634 | } | 634 | } |
635 | 635 | ||
636 | blkdev_put(resume_bdev, mode); /* move up */ | 636 | blkdev_put(resume_bdev, mode); |
637 | } | 637 | } |
638 | 638 | ||
639 | static int swsusp_header_init(void) | 639 | static int swsusp_header_init(void) |
diff --git a/kernel/profile.c b/kernel/profile.c index dc41827fbfee..60adefb59b5e 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -544,7 +544,7 @@ static const struct file_operations proc_profile_operations = { | |||
544 | }; | 544 | }; |
545 | 545 | ||
546 | #ifdef CONFIG_SMP | 546 | #ifdef CONFIG_SMP |
547 | static inline void profile_nop(void *unused) | 547 | static void profile_nop(void *unused) |
548 | { | 548 | { |
549 | } | 549 | } |
550 | 550 | ||
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 4c8bcd7dd8e0..29dc700e198c 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -25,6 +25,17 @@ | |||
25 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | 27 | ||
28 | |||
29 | /* | ||
30 | * Initialize a new task whose father had been ptraced. | ||
31 | * | ||
32 | * Called from copy_process(). | ||
33 | */ | ||
34 | void ptrace_fork(struct task_struct *child, unsigned long clone_flags) | ||
35 | { | ||
36 | arch_ptrace_fork(child, clone_flags); | ||
37 | } | ||
38 | |||
28 | /* | 39 | /* |
29 | * ptrace a task: make the debugger its new parent and | 40 | * ptrace a task: make the debugger its new parent and |
30 | * move it to the ptrace list. | 41 | * move it to the ptrace list. |
@@ -72,6 +83,7 @@ void __ptrace_unlink(struct task_struct *child) | |||
72 | child->parent = child->real_parent; | 83 | child->parent = child->real_parent; |
73 | list_del_init(&child->ptrace_entry); | 84 | list_del_init(&child->ptrace_entry); |
74 | 85 | ||
86 | arch_ptrace_untrace(child); | ||
75 | if (task_is_traced(child)) | 87 | if (task_is_traced(child)) |
76 | ptrace_untrace(child); | 88 | ptrace_untrace(child); |
77 | } | 89 | } |
@@ -115,6 +127,8 @@ int ptrace_check_attach(struct task_struct *child, int kill) | |||
115 | 127 | ||
116 | int __ptrace_may_access(struct task_struct *task, unsigned int mode) | 128 | int __ptrace_may_access(struct task_struct *task, unsigned int mode) |
117 | { | 129 | { |
130 | const struct cred *cred = current_cred(), *tcred; | ||
131 | |||
118 | /* May we inspect the given task? | 132 | /* May we inspect the given task? |
119 | * This check is used both for attaching with ptrace | 133 | * This check is used both for attaching with ptrace |
120 | * and for allowing access to sensitive information in /proc. | 134 | * and for allowing access to sensitive information in /proc. |
@@ -127,13 +141,19 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
127 | /* Don't let security modules deny introspection */ | 141 | /* Don't let security modules deny introspection */ |
128 | if (task == current) | 142 | if (task == current) |
129 | return 0; | 143 | return 0; |
130 | if (((current->uid != task->euid) || | 144 | rcu_read_lock(); |
131 | (current->uid != task->suid) || | 145 | tcred = __task_cred(task); |
132 | (current->uid != task->uid) || | 146 | if ((cred->uid != tcred->euid || |
133 | (current->gid != task->egid) || | 147 | cred->uid != tcred->suid || |
134 | (current->gid != task->sgid) || | 148 | cred->uid != tcred->uid || |
135 | (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE)) | 149 | cred->gid != tcred->egid || |
150 | cred->gid != tcred->sgid || | ||
151 | cred->gid != tcred->gid) && | ||
152 | !capable(CAP_SYS_PTRACE)) { | ||
153 | rcu_read_unlock(); | ||
136 | return -EPERM; | 154 | return -EPERM; |
155 | } | ||
156 | rcu_read_unlock(); | ||
137 | smp_rmb(); | 157 | smp_rmb(); |
138 | if (task->mm) | 158 | if (task->mm) |
139 | dumpable = get_dumpable(task->mm); | 159 | dumpable = get_dumpable(task->mm); |
@@ -163,6 +183,14 @@ int ptrace_attach(struct task_struct *task) | |||
163 | if (same_thread_group(task, current)) | 183 | if (same_thread_group(task, current)) |
164 | goto out; | 184 | goto out; |
165 | 185 | ||
186 | /* Protect exec's credential calculations against our interference; | ||
187 | * SUID, SGID and LSM creds get determined differently under ptrace. | ||
188 | */ | ||
189 | retval = mutex_lock_interruptible(¤t->cred_exec_mutex); | ||
190 | if (retval < 0) | ||
191 | goto out; | ||
192 | |||
193 | retval = -EPERM; | ||
166 | repeat: | 194 | repeat: |
167 | /* | 195 | /* |
168 | * Nasty, nasty. | 196 | * Nasty, nasty. |
@@ -202,6 +230,7 @@ repeat: | |||
202 | bad: | 230 | bad: |
203 | write_unlock_irqrestore(&tasklist_lock, flags); | 231 | write_unlock_irqrestore(&tasklist_lock, flags); |
204 | task_unlock(task); | 232 | task_unlock(task); |
233 | mutex_unlock(¤t->cred_exec_mutex); | ||
205 | out: | 234 | out: |
206 | return retval; | 235 | return retval; |
207 | } | 236 | } |
diff --git a/kernel/relay.c b/kernel/relay.c index 32b0befdcb6a..09ac2008f77b 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -1317,12 +1317,9 @@ static ssize_t relay_file_splice_read(struct file *in, | |||
1317 | if (ret < 0) | 1317 | if (ret < 0) |
1318 | break; | 1318 | break; |
1319 | else if (!ret) { | 1319 | else if (!ret) { |
1320 | if (spliced) | 1320 | if (flags & SPLICE_F_NONBLOCK) |
1321 | break; | ||
1322 | if (flags & SPLICE_F_NONBLOCK) { | ||
1323 | ret = -EAGAIN; | 1321 | ret = -EAGAIN; |
1324 | break; | 1322 | break; |
1325 | } | ||
1326 | } | 1323 | } |
1327 | 1324 | ||
1328 | *ppos += ret; | 1325 | *ppos += ret; |
diff --git a/kernel/sched.c b/kernel/sched.c index b7480fb5c3dc..748ff924a290 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -118,6 +118,12 @@ | |||
118 | */ | 118 | */ |
119 | #define RUNTIME_INF ((u64)~0ULL) | 119 | #define RUNTIME_INF ((u64)~0ULL) |
120 | 120 | ||
121 | DEFINE_TRACE(sched_wait_task); | ||
122 | DEFINE_TRACE(sched_wakeup); | ||
123 | DEFINE_TRACE(sched_wakeup_new); | ||
124 | DEFINE_TRACE(sched_switch); | ||
125 | DEFINE_TRACE(sched_migrate_task); | ||
126 | |||
121 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
122 | /* | 128 | /* |
123 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) | 129 | * Divide a load by a sched group cpu_power : (load / sg->__cpu_power) |
@@ -261,6 +267,10 @@ struct task_group { | |||
261 | struct cgroup_subsys_state css; | 267 | struct cgroup_subsys_state css; |
262 | #endif | 268 | #endif |
263 | 269 | ||
270 | #ifdef CONFIG_USER_SCHED | ||
271 | uid_t uid; | ||
272 | #endif | ||
273 | |||
264 | #ifdef CONFIG_FAIR_GROUP_SCHED | 274 | #ifdef CONFIG_FAIR_GROUP_SCHED |
265 | /* schedulable entities of this group on each cpu */ | 275 | /* schedulable entities of this group on each cpu */ |
266 | struct sched_entity **se; | 276 | struct sched_entity **se; |
@@ -286,6 +296,12 @@ struct task_group { | |||
286 | 296 | ||
287 | #ifdef CONFIG_USER_SCHED | 297 | #ifdef CONFIG_USER_SCHED |
288 | 298 | ||
299 | /* Helper function to pass uid information to create_sched_user() */ | ||
300 | void set_tg_uid(struct user_struct *user) | ||
301 | { | ||
302 | user->tg->uid = user->uid; | ||
303 | } | ||
304 | |||
289 | /* | 305 | /* |
290 | * Root task group. | 306 | * Root task group. |
291 | * Every UID task group (including init_task_group aka UID-0) will | 307 | * Every UID task group (including init_task_group aka UID-0) will |
@@ -345,7 +361,9 @@ static inline struct task_group *task_group(struct task_struct *p) | |||
345 | struct task_group *tg; | 361 | struct task_group *tg; |
346 | 362 | ||
347 | #ifdef CONFIG_USER_SCHED | 363 | #ifdef CONFIG_USER_SCHED |
348 | tg = p->user->tg; | 364 | rcu_read_lock(); |
365 | tg = __task_cred(p)->user->tg; | ||
366 | rcu_read_unlock(); | ||
349 | #elif defined(CONFIG_CGROUP_SCHED) | 367 | #elif defined(CONFIG_CGROUP_SCHED) |
350 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), | 368 | tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id), |
351 | struct task_group, css); | 369 | struct task_group, css); |
@@ -586,6 +604,8 @@ struct rq { | |||
586 | #ifdef CONFIG_SCHEDSTATS | 604 | #ifdef CONFIG_SCHEDSTATS |
587 | /* latency stats */ | 605 | /* latency stats */ |
588 | struct sched_info rq_sched_info; | 606 | struct sched_info rq_sched_info; |
607 | unsigned long long rq_cpu_time; | ||
608 | /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ | ||
589 | 609 | ||
590 | /* sys_sched_yield() stats */ | 610 | /* sys_sched_yield() stats */ |
591 | unsigned int yld_exp_empty; | 611 | unsigned int yld_exp_empty; |
@@ -703,45 +723,18 @@ static __read_mostly char *sched_feat_names[] = { | |||
703 | 723 | ||
704 | #undef SCHED_FEAT | 724 | #undef SCHED_FEAT |
705 | 725 | ||
706 | static int sched_feat_open(struct inode *inode, struct file *filp) | 726 | static int sched_feat_show(struct seq_file *m, void *v) |
707 | { | ||
708 | filp->private_data = inode->i_private; | ||
709 | return 0; | ||
710 | } | ||
711 | |||
712 | static ssize_t | ||
713 | sched_feat_read(struct file *filp, char __user *ubuf, | ||
714 | size_t cnt, loff_t *ppos) | ||
715 | { | 727 | { |
716 | char *buf; | ||
717 | int r = 0; | ||
718 | int len = 0; | ||
719 | int i; | 728 | int i; |
720 | 729 | ||
721 | for (i = 0; sched_feat_names[i]; i++) { | 730 | for (i = 0; sched_feat_names[i]; i++) { |
722 | len += strlen(sched_feat_names[i]); | 731 | if (!(sysctl_sched_features & (1UL << i))) |
723 | len += 4; | 732 | seq_puts(m, "NO_"); |
724 | } | 733 | seq_printf(m, "%s ", sched_feat_names[i]); |
725 | |||
726 | buf = kmalloc(len + 2, GFP_KERNEL); | ||
727 | if (!buf) | ||
728 | return -ENOMEM; | ||
729 | |||
730 | for (i = 0; sched_feat_names[i]; i++) { | ||
731 | if (sysctl_sched_features & (1UL << i)) | ||
732 | r += sprintf(buf + r, "%s ", sched_feat_names[i]); | ||
733 | else | ||
734 | r += sprintf(buf + r, "NO_%s ", sched_feat_names[i]); | ||
735 | } | 734 | } |
735 | seq_puts(m, "\n"); | ||
736 | 736 | ||
737 | r += sprintf(buf + r, "\n"); | 737 | return 0; |
738 | WARN_ON(r >= len + 2); | ||
739 | |||
740 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
741 | |||
742 | kfree(buf); | ||
743 | |||
744 | return r; | ||
745 | } | 738 | } |
746 | 739 | ||
747 | static ssize_t | 740 | static ssize_t |
@@ -786,10 +779,17 @@ sched_feat_write(struct file *filp, const char __user *ubuf, | |||
786 | return cnt; | 779 | return cnt; |
787 | } | 780 | } |
788 | 781 | ||
782 | static int sched_feat_open(struct inode *inode, struct file *filp) | ||
783 | { | ||
784 | return single_open(filp, sched_feat_show, NULL); | ||
785 | } | ||
786 | |||
789 | static struct file_operations sched_feat_fops = { | 787 | static struct file_operations sched_feat_fops = { |
790 | .open = sched_feat_open, | 788 | .open = sched_feat_open, |
791 | .read = sched_feat_read, | 789 | .write = sched_feat_write, |
792 | .write = sched_feat_write, | 790 | .read = seq_read, |
791 | .llseek = seq_lseek, | ||
792 | .release = single_release, | ||
793 | }; | 793 | }; |
794 | 794 | ||
795 | static __init int sched_init_debug(void) | 795 | static __init int sched_init_debug(void) |
@@ -1474,27 +1474,13 @@ static void | |||
1474 | update_group_shares_cpu(struct task_group *tg, int cpu, | 1474 | update_group_shares_cpu(struct task_group *tg, int cpu, |
1475 | unsigned long sd_shares, unsigned long sd_rq_weight) | 1475 | unsigned long sd_shares, unsigned long sd_rq_weight) |
1476 | { | 1476 | { |
1477 | int boost = 0; | ||
1478 | unsigned long shares; | 1477 | unsigned long shares; |
1479 | unsigned long rq_weight; | 1478 | unsigned long rq_weight; |
1480 | 1479 | ||
1481 | if (!tg->se[cpu]) | 1480 | if (!tg->se[cpu]) |
1482 | return; | 1481 | return; |
1483 | 1482 | ||
1484 | rq_weight = tg->cfs_rq[cpu]->load.weight; | 1483 | rq_weight = tg->cfs_rq[cpu]->rq_weight; |
1485 | |||
1486 | /* | ||
1487 | * If there are currently no tasks on the cpu pretend there is one of | ||
1488 | * average load so that when a new task gets to run here it will not | ||
1489 | * get delayed by group starvation. | ||
1490 | */ | ||
1491 | if (!rq_weight) { | ||
1492 | boost = 1; | ||
1493 | rq_weight = NICE_0_LOAD; | ||
1494 | } | ||
1495 | |||
1496 | if (unlikely(rq_weight > sd_rq_weight)) | ||
1497 | rq_weight = sd_rq_weight; | ||
1498 | 1484 | ||
1499 | /* | 1485 | /* |
1500 | * \Sum shares * rq_weight | 1486 | * \Sum shares * rq_weight |
@@ -1502,7 +1488,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1502 | * \Sum rq_weight | 1488 | * \Sum rq_weight |
1503 | * | 1489 | * |
1504 | */ | 1490 | */ |
1505 | shares = (sd_shares * rq_weight) / (sd_rq_weight + 1); | 1491 | shares = (sd_shares * rq_weight) / sd_rq_weight; |
1506 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); | 1492 | shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES); |
1507 | 1493 | ||
1508 | if (abs(shares - tg->se[cpu]->load.weight) > | 1494 | if (abs(shares - tg->se[cpu]->load.weight) > |
@@ -1511,11 +1497,7 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1511 | unsigned long flags; | 1497 | unsigned long flags; |
1512 | 1498 | ||
1513 | spin_lock_irqsave(&rq->lock, flags); | 1499 | spin_lock_irqsave(&rq->lock, flags); |
1514 | /* | 1500 | tg->cfs_rq[cpu]->shares = shares; |
1515 | * record the actual number of shares, not the boosted amount. | ||
1516 | */ | ||
1517 | tg->cfs_rq[cpu]->shares = boost ? 0 : shares; | ||
1518 | tg->cfs_rq[cpu]->rq_weight = rq_weight; | ||
1519 | 1501 | ||
1520 | __set_se_shares(tg->se[cpu], shares); | 1502 | __set_se_shares(tg->se[cpu], shares); |
1521 | spin_unlock_irqrestore(&rq->lock, flags); | 1503 | spin_unlock_irqrestore(&rq->lock, flags); |
@@ -1529,13 +1511,23 @@ update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1529 | */ | 1511 | */ |
1530 | static int tg_shares_up(struct task_group *tg, void *data) | 1512 | static int tg_shares_up(struct task_group *tg, void *data) |
1531 | { | 1513 | { |
1532 | unsigned long rq_weight = 0; | 1514 | unsigned long weight, rq_weight = 0; |
1533 | unsigned long shares = 0; | 1515 | unsigned long shares = 0; |
1534 | struct sched_domain *sd = data; | 1516 | struct sched_domain *sd = data; |
1535 | int i; | 1517 | int i; |
1536 | 1518 | ||
1537 | for_each_cpu_mask(i, sd->span) { | 1519 | for_each_cpu_mask(i, sd->span) { |
1538 | rq_weight += tg->cfs_rq[i]->load.weight; | 1520 | /* |
1521 | * If there are currently no tasks on the cpu pretend there | ||
1522 | * is one of average load so that when a new task gets to | ||
1523 | * run here it will not get delayed by group starvation. | ||
1524 | */ | ||
1525 | weight = tg->cfs_rq[i]->load.weight; | ||
1526 | if (!weight) | ||
1527 | weight = NICE_0_LOAD; | ||
1528 | |||
1529 | tg->cfs_rq[i]->rq_weight = weight; | ||
1530 | rq_weight += weight; | ||
1539 | shares += tg->cfs_rq[i]->shares; | 1531 | shares += tg->cfs_rq[i]->shares; |
1540 | } | 1532 | } |
1541 | 1533 | ||
@@ -1545,9 +1537,6 @@ static int tg_shares_up(struct task_group *tg, void *data) | |||
1545 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) | 1537 | if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) |
1546 | shares = tg->shares; | 1538 | shares = tg->shares; |
1547 | 1539 | ||
1548 | if (!rq_weight) | ||
1549 | rq_weight = cpus_weight(sd->span) * NICE_0_LOAD; | ||
1550 | |||
1551 | for_each_cpu_mask(i, sd->span) | 1540 | for_each_cpu_mask(i, sd->span) |
1552 | update_group_shares_cpu(tg, i, shares, rq_weight); | 1541 | update_group_shares_cpu(tg, i, shares, rq_weight); |
1553 | 1542 | ||
@@ -1612,6 +1601,39 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1612 | 1601 | ||
1613 | #endif | 1602 | #endif |
1614 | 1603 | ||
1604 | /* | ||
1605 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
1606 | */ | ||
1607 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
1608 | __releases(this_rq->lock) | ||
1609 | __acquires(busiest->lock) | ||
1610 | __acquires(this_rq->lock) | ||
1611 | { | ||
1612 | int ret = 0; | ||
1613 | |||
1614 | if (unlikely(!irqs_disabled())) { | ||
1615 | /* printk() doesn't work good under rq->lock */ | ||
1616 | spin_unlock(&this_rq->lock); | ||
1617 | BUG_ON(1); | ||
1618 | } | ||
1619 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
1620 | if (busiest < this_rq) { | ||
1621 | spin_unlock(&this_rq->lock); | ||
1622 | spin_lock(&busiest->lock); | ||
1623 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
1624 | ret = 1; | ||
1625 | } else | ||
1626 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
1627 | } | ||
1628 | return ret; | ||
1629 | } | ||
1630 | |||
1631 | static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
1632 | __releases(busiest->lock) | ||
1633 | { | ||
1634 | spin_unlock(&busiest->lock); | ||
1635 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
1636 | } | ||
1615 | #endif | 1637 | #endif |
1616 | 1638 | ||
1617 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1639 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -1845,6 +1867,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1845 | 1867 | ||
1846 | clock_offset = old_rq->clock - new_rq->clock; | 1868 | clock_offset = old_rq->clock - new_rq->clock; |
1847 | 1869 | ||
1870 | trace_sched_migrate_task(p, task_cpu(p), new_cpu); | ||
1871 | |||
1848 | #ifdef CONFIG_SCHEDSTATS | 1872 | #ifdef CONFIG_SCHEDSTATS |
1849 | if (p->se.wait_start) | 1873 | if (p->se.wait_start) |
1850 | p->se.wait_start -= clock_offset; | 1874 | p->se.wait_start -= clock_offset; |
@@ -2254,6 +2278,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2254 | 2278 | ||
2255 | smp_wmb(); | 2279 | smp_wmb(); |
2256 | rq = task_rq_lock(p, &flags); | 2280 | rq = task_rq_lock(p, &flags); |
2281 | update_rq_clock(rq); | ||
2257 | old_state = p->state; | 2282 | old_state = p->state; |
2258 | if (!(old_state & state)) | 2283 | if (!(old_state & state)) |
2259 | goto out; | 2284 | goto out; |
@@ -2311,12 +2336,11 @@ out_activate: | |||
2311 | schedstat_inc(p, se.nr_wakeups_local); | 2336 | schedstat_inc(p, se.nr_wakeups_local); |
2312 | else | 2337 | else |
2313 | schedstat_inc(p, se.nr_wakeups_remote); | 2338 | schedstat_inc(p, se.nr_wakeups_remote); |
2314 | update_rq_clock(rq); | ||
2315 | activate_task(rq, p, 1); | 2339 | activate_task(rq, p, 1); |
2316 | success = 1; | 2340 | success = 1; |
2317 | 2341 | ||
2318 | out_running: | 2342 | out_running: |
2319 | trace_sched_wakeup(rq, p); | 2343 | trace_sched_wakeup(rq, p, success); |
2320 | check_preempt_curr(rq, p, sync); | 2344 | check_preempt_curr(rq, p, sync); |
2321 | 2345 | ||
2322 | p->state = TASK_RUNNING; | 2346 | p->state = TASK_RUNNING; |
@@ -2449,7 +2473,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2449 | p->sched_class->task_new(rq, p); | 2473 | p->sched_class->task_new(rq, p); |
2450 | inc_nr_running(rq); | 2474 | inc_nr_running(rq); |
2451 | } | 2475 | } |
2452 | trace_sched_wakeup_new(rq, p); | 2476 | trace_sched_wakeup_new(rq, p, 1); |
2453 | check_preempt_curr(rq, p, 0); | 2477 | check_preempt_curr(rq, p, 0); |
2454 | #ifdef CONFIG_SMP | 2478 | #ifdef CONFIG_SMP |
2455 | if (p->sched_class->task_wake_up) | 2479 | if (p->sched_class->task_wake_up) |
@@ -2812,40 +2836,6 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2) | |||
2812 | } | 2836 | } |
2813 | 2837 | ||
2814 | /* | 2838 | /* |
2815 | * double_lock_balance - lock the busiest runqueue, this_rq is locked already. | ||
2816 | */ | ||
2817 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest) | ||
2818 | __releases(this_rq->lock) | ||
2819 | __acquires(busiest->lock) | ||
2820 | __acquires(this_rq->lock) | ||
2821 | { | ||
2822 | int ret = 0; | ||
2823 | |||
2824 | if (unlikely(!irqs_disabled())) { | ||
2825 | /* printk() doesn't work good under rq->lock */ | ||
2826 | spin_unlock(&this_rq->lock); | ||
2827 | BUG_ON(1); | ||
2828 | } | ||
2829 | if (unlikely(!spin_trylock(&busiest->lock))) { | ||
2830 | if (busiest < this_rq) { | ||
2831 | spin_unlock(&this_rq->lock); | ||
2832 | spin_lock(&busiest->lock); | ||
2833 | spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING); | ||
2834 | ret = 1; | ||
2835 | } else | ||
2836 | spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING); | ||
2837 | } | ||
2838 | return ret; | ||
2839 | } | ||
2840 | |||
2841 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest) | ||
2842 | __releases(busiest->lock) | ||
2843 | { | ||
2844 | spin_unlock(&busiest->lock); | ||
2845 | lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_); | ||
2846 | } | ||
2847 | |||
2848 | /* | ||
2849 | * If dest_cpu is allowed for this process, migrate the task to it. | 2839 | * If dest_cpu is allowed for this process, migrate the task to it. |
2850 | * This is accomplished by forcing the cpu_allowed mask to only | 2840 | * This is accomplished by forcing the cpu_allowed mask to only |
2851 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then | 2841 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
@@ -2862,7 +2852,6 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu) | |||
2862 | || unlikely(!cpu_active(dest_cpu))) | 2852 | || unlikely(!cpu_active(dest_cpu))) |
2863 | goto out; | 2853 | goto out; |
2864 | 2854 | ||
2865 | trace_sched_migrate_task(rq, p, dest_cpu); | ||
2866 | /* force the process onto the specified CPU */ | 2855 | /* force the process onto the specified CPU */ |
2867 | if (migrate_task(p, dest_cpu, &req)) { | 2856 | if (migrate_task(p, dest_cpu, &req)) { |
2868 | /* Need to wait for migration thread (might exit: take ref). */ | 2857 | /* Need to wait for migration thread (might exit: take ref). */ |
@@ -3707,7 +3696,7 @@ out_balanced: | |||
3707 | static void idle_balance(int this_cpu, struct rq *this_rq) | 3696 | static void idle_balance(int this_cpu, struct rq *this_rq) |
3708 | { | 3697 | { |
3709 | struct sched_domain *sd; | 3698 | struct sched_domain *sd; |
3710 | int pulled_task = -1; | 3699 | int pulled_task = 0; |
3711 | unsigned long next_balance = jiffies + HZ; | 3700 | unsigned long next_balance = jiffies + HZ; |
3712 | cpumask_t tmpmask; | 3701 | cpumask_t tmpmask; |
3713 | 3702 | ||
@@ -5134,6 +5123,22 @@ __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio) | |||
5134 | set_load_weight(p); | 5123 | set_load_weight(p); |
5135 | } | 5124 | } |
5136 | 5125 | ||
5126 | /* | ||
5127 | * check the target process has a UID that matches the current process's | ||
5128 | */ | ||
5129 | static bool check_same_owner(struct task_struct *p) | ||
5130 | { | ||
5131 | const struct cred *cred = current_cred(), *pcred; | ||
5132 | bool match; | ||
5133 | |||
5134 | rcu_read_lock(); | ||
5135 | pcred = __task_cred(p); | ||
5136 | match = (cred->euid == pcred->euid || | ||
5137 | cred->euid == pcred->uid); | ||
5138 | rcu_read_unlock(); | ||
5139 | return match; | ||
5140 | } | ||
5141 | |||
5137 | static int __sched_setscheduler(struct task_struct *p, int policy, | 5142 | static int __sched_setscheduler(struct task_struct *p, int policy, |
5138 | struct sched_param *param, bool user) | 5143 | struct sched_param *param, bool user) |
5139 | { | 5144 | { |
@@ -5193,8 +5198,7 @@ recheck: | |||
5193 | return -EPERM; | 5198 | return -EPERM; |
5194 | 5199 | ||
5195 | /* can't change other user's priorities */ | 5200 | /* can't change other user's priorities */ |
5196 | if ((current->euid != p->euid) && | 5201 | if (!check_same_owner(p)) |
5197 | (current->euid != p->uid)) | ||
5198 | return -EPERM; | 5202 | return -EPERM; |
5199 | } | 5203 | } |
5200 | 5204 | ||
@@ -5426,8 +5430,7 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask) | |||
5426 | read_unlock(&tasklist_lock); | 5430 | read_unlock(&tasklist_lock); |
5427 | 5431 | ||
5428 | retval = -EPERM; | 5432 | retval = -EPERM; |
5429 | if ((current->euid != p->euid) && (current->euid != p->uid) && | 5433 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) |
5430 | !capable(CAP_SYS_NICE)) | ||
5431 | goto out_unlock; | 5434 | goto out_unlock; |
5432 | 5435 | ||
5433 | retval = security_task_setscheduler(p, 0, NULL); | 5436 | retval = security_task_setscheduler(p, 0, NULL); |
@@ -5896,6 +5899,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5896 | * The idle tasks have their own, simple scheduling class: | 5899 | * The idle tasks have their own, simple scheduling class: |
5897 | */ | 5900 | */ |
5898 | idle->sched_class = &idle_sched_class; | 5901 | idle->sched_class = &idle_sched_class; |
5902 | ftrace_graph_init_task(idle); | ||
5899 | } | 5903 | } |
5900 | 5904 | ||
5901 | /* | 5905 | /* |
@@ -6126,7 +6130,6 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6126 | 6130 | ||
6127 | /* | 6131 | /* |
6128 | * Figure out where task on dead CPU should go, use force if necessary. | 6132 | * Figure out where task on dead CPU should go, use force if necessary. |
6129 | * NOTE: interrupts should be disabled by the caller | ||
6130 | */ | 6133 | */ |
6131 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6134 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6132 | { | 6135 | { |
@@ -6587,7 +6590,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
6587 | req = list_entry(rq->migration_queue.next, | 6590 | req = list_entry(rq->migration_queue.next, |
6588 | struct migration_req, list); | 6591 | struct migration_req, list); |
6589 | list_del_init(&req->list); | 6592 | list_del_init(&req->list); |
6593 | spin_unlock_irq(&rq->lock); | ||
6590 | complete(&req->done); | 6594 | complete(&req->done); |
6595 | spin_lock_irq(&rq->lock); | ||
6591 | } | 6596 | } |
6592 | spin_unlock_irq(&rq->lock); | 6597 | spin_unlock_irq(&rq->lock); |
6593 | break; | 6598 | break; |
@@ -6636,28 +6641,6 @@ early_initcall(migration_init); | |||
6636 | 6641 | ||
6637 | #ifdef CONFIG_SCHED_DEBUG | 6642 | #ifdef CONFIG_SCHED_DEBUG |
6638 | 6643 | ||
6639 | static inline const char *sd_level_to_string(enum sched_domain_level lvl) | ||
6640 | { | ||
6641 | switch (lvl) { | ||
6642 | case SD_LV_NONE: | ||
6643 | return "NONE"; | ||
6644 | case SD_LV_SIBLING: | ||
6645 | return "SIBLING"; | ||
6646 | case SD_LV_MC: | ||
6647 | return "MC"; | ||
6648 | case SD_LV_CPU: | ||
6649 | return "CPU"; | ||
6650 | case SD_LV_NODE: | ||
6651 | return "NODE"; | ||
6652 | case SD_LV_ALLNODES: | ||
6653 | return "ALLNODES"; | ||
6654 | case SD_LV_MAX: | ||
6655 | return "MAX"; | ||
6656 | |||
6657 | } | ||
6658 | return "MAX"; | ||
6659 | } | ||
6660 | |||
6661 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | 6644 | static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, |
6662 | cpumask_t *groupmask) | 6645 | cpumask_t *groupmask) |
6663 | { | 6646 | { |
@@ -6677,8 +6660,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, | |||
6677 | return -1; | 6660 | return -1; |
6678 | } | 6661 | } |
6679 | 6662 | ||
6680 | printk(KERN_CONT "span %s level %s\n", | 6663 | printk(KERN_CONT "span %s level %s\n", str, sd->name); |
6681 | str, sd_level_to_string(sd->level)); | ||
6682 | 6664 | ||
6683 | if (!cpu_isset(cpu, sd->span)) { | 6665 | if (!cpu_isset(cpu, sd->span)) { |
6684 | printk(KERN_ERR "ERROR: domain->span does not contain " | 6666 | printk(KERN_ERR "ERROR: domain->span does not contain " |
@@ -6814,6 +6796,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) | |||
6814 | SD_BALANCE_EXEC | | 6796 | SD_BALANCE_EXEC | |
6815 | SD_SHARE_CPUPOWER | | 6797 | SD_SHARE_CPUPOWER | |
6816 | SD_SHARE_PKG_RESOURCES); | 6798 | SD_SHARE_PKG_RESOURCES); |
6799 | if (nr_node_ids == 1) | ||
6800 | pflags &= ~SD_SERIALIZE; | ||
6817 | } | 6801 | } |
6818 | if (~cflags & pflags) | 6802 | if (~cflags & pflags) |
6819 | return 0; | 6803 | return 0; |
@@ -7334,13 +7318,21 @@ struct allmasks { | |||
7334 | }; | 7318 | }; |
7335 | 7319 | ||
7336 | #if NR_CPUS > 128 | 7320 | #if NR_CPUS > 128 |
7337 | #define SCHED_CPUMASK_ALLOC 1 | 7321 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v |
7338 | #define SCHED_CPUMASK_FREE(v) kfree(v) | 7322 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
7339 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks *v | 7323 | { |
7324 | *masks = kmalloc(sizeof(**masks), GFP_KERNEL); | ||
7325 | } | ||
7326 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7327 | { | ||
7328 | kfree(masks); | ||
7329 | } | ||
7340 | #else | 7330 | #else |
7341 | #define SCHED_CPUMASK_ALLOC 0 | 7331 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v |
7342 | #define SCHED_CPUMASK_FREE(v) | 7332 | static inline void sched_cpumask_alloc(struct allmasks **masks) |
7343 | #define SCHED_CPUMASK_DECLARE(v) struct allmasks _v, *v = &_v | 7333 | { } |
7334 | static inline void sched_cpumask_free(struct allmasks *masks) | ||
7335 | { } | ||
7344 | #endif | 7336 | #endif |
7345 | 7337 | ||
7346 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ | 7338 | #define SCHED_CPUMASK_VAR(v, a) cpumask_t *v = (cpumask_t *) \ |
@@ -7416,9 +7408,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7416 | return -ENOMEM; | 7408 | return -ENOMEM; |
7417 | } | 7409 | } |
7418 | 7410 | ||
7419 | #if SCHED_CPUMASK_ALLOC | ||
7420 | /* get space for all scratch cpumask variables */ | 7411 | /* get space for all scratch cpumask variables */ |
7421 | allmasks = kmalloc(sizeof(*allmasks), GFP_KERNEL); | 7412 | sched_cpumask_alloc(&allmasks); |
7422 | if (!allmasks) { | 7413 | if (!allmasks) { |
7423 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); | 7414 | printk(KERN_WARNING "Cannot alloc cpumask array\n"); |
7424 | kfree(rd); | 7415 | kfree(rd); |
@@ -7427,7 +7418,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7427 | #endif | 7418 | #endif |
7428 | return -ENOMEM; | 7419 | return -ENOMEM; |
7429 | } | 7420 | } |
7430 | #endif | 7421 | |
7431 | tmpmask = (cpumask_t *)allmasks; | 7422 | tmpmask = (cpumask_t *)allmasks; |
7432 | 7423 | ||
7433 | 7424 | ||
@@ -7681,13 +7672,13 @@ static int __build_sched_domains(const cpumask_t *cpu_map, | |||
7681 | cpu_attach_domain(sd, rd, i); | 7672 | cpu_attach_domain(sd, rd, i); |
7682 | } | 7673 | } |
7683 | 7674 | ||
7684 | SCHED_CPUMASK_FREE((void *)allmasks); | 7675 | sched_cpumask_free(allmasks); |
7685 | return 0; | 7676 | return 0; |
7686 | 7677 | ||
7687 | #ifdef CONFIG_NUMA | 7678 | #ifdef CONFIG_NUMA |
7688 | error: | 7679 | error: |
7689 | free_sched_groups(cpu_map, tmpmask); | 7680 | free_sched_groups(cpu_map, tmpmask); |
7690 | SCHED_CPUMASK_FREE((void *)allmasks); | 7681 | sched_cpumask_free(allmasks); |
7691 | kfree(rd); | 7682 | kfree(rd); |
7692 | return -ENOMEM; | 7683 | return -ENOMEM; |
7693 | #endif | 7684 | #endif |
@@ -7710,8 +7701,14 @@ static struct sched_domain_attr *dattr_cur; | |||
7710 | */ | 7701 | */ |
7711 | static cpumask_t fallback_doms; | 7702 | static cpumask_t fallback_doms; |
7712 | 7703 | ||
7713 | void __attribute__((weak)) arch_update_cpu_topology(void) | 7704 | /* |
7705 | * arch_update_cpu_topology lets virtualized architectures update the | ||
7706 | * cpu core maps. It is supposed to return 1 if the topology changed | ||
7707 | * or 0 if it stayed the same. | ||
7708 | */ | ||
7709 | int __attribute__((weak)) arch_update_cpu_topology(void) | ||
7714 | { | 7710 | { |
7711 | return 0; | ||
7715 | } | 7712 | } |
7716 | 7713 | ||
7717 | /* | 7714 | /* |
@@ -7751,8 +7748,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map) | |||
7751 | cpumask_t tmpmask; | 7748 | cpumask_t tmpmask; |
7752 | int i; | 7749 | int i; |
7753 | 7750 | ||
7754 | unregister_sched_domain_sysctl(); | ||
7755 | |||
7756 | for_each_cpu_mask_nr(i, *cpu_map) | 7751 | for_each_cpu_mask_nr(i, *cpu_map) |
7757 | cpu_attach_domain(NULL, &def_root_domain, i); | 7752 | cpu_attach_domain(NULL, &def_root_domain, i); |
7758 | synchronize_sched(); | 7753 | synchronize_sched(); |
@@ -7805,17 +7800,21 @@ void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, | |||
7805 | struct sched_domain_attr *dattr_new) | 7800 | struct sched_domain_attr *dattr_new) |
7806 | { | 7801 | { |
7807 | int i, j, n; | 7802 | int i, j, n; |
7803 | int new_topology; | ||
7808 | 7804 | ||
7809 | mutex_lock(&sched_domains_mutex); | 7805 | mutex_lock(&sched_domains_mutex); |
7810 | 7806 | ||
7811 | /* always unregister in case we don't destroy any domains */ | 7807 | /* always unregister in case we don't destroy any domains */ |
7812 | unregister_sched_domain_sysctl(); | 7808 | unregister_sched_domain_sysctl(); |
7813 | 7809 | ||
7810 | /* Let architecture update cpu core mappings. */ | ||
7811 | new_topology = arch_update_cpu_topology(); | ||
7812 | |||
7814 | n = doms_new ? ndoms_new : 0; | 7813 | n = doms_new ? ndoms_new : 0; |
7815 | 7814 | ||
7816 | /* Destroy deleted domains */ | 7815 | /* Destroy deleted domains */ |
7817 | for (i = 0; i < ndoms_cur; i++) { | 7816 | for (i = 0; i < ndoms_cur; i++) { |
7818 | for (j = 0; j < n; j++) { | 7817 | for (j = 0; j < n && !new_topology; j++) { |
7819 | if (cpus_equal(doms_cur[i], doms_new[j]) | 7818 | if (cpus_equal(doms_cur[i], doms_new[j]) |
7820 | && dattrs_equal(dattr_cur, i, dattr_new, j)) | 7819 | && dattrs_equal(dattr_cur, i, dattr_new, j)) |
7821 | goto match1; | 7820 | goto match1; |
@@ -7830,12 +7829,12 @@ match1: | |||
7830 | ndoms_cur = 0; | 7829 | ndoms_cur = 0; |
7831 | doms_new = &fallback_doms; | 7830 | doms_new = &fallback_doms; |
7832 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); | 7831 | cpus_andnot(doms_new[0], cpu_online_map, cpu_isolated_map); |
7833 | dattr_new = NULL; | 7832 | WARN_ON_ONCE(dattr_new); |
7834 | } | 7833 | } |
7835 | 7834 | ||
7836 | /* Build new domains */ | 7835 | /* Build new domains */ |
7837 | for (i = 0; i < ndoms_new; i++) { | 7836 | for (i = 0; i < ndoms_new; i++) { |
7838 | for (j = 0; j < ndoms_cur; j++) { | 7837 | for (j = 0; j < ndoms_cur && !new_topology; j++) { |
7839 | if (cpus_equal(doms_new[i], doms_cur[j]) | 7838 | if (cpus_equal(doms_new[i], doms_cur[j]) |
7840 | && dattrs_equal(dattr_new, i, dattr_cur, j)) | 7839 | && dattrs_equal(dattr_new, i, dattr_cur, j)) |
7841 | goto match2; | 7840 | goto match2; |
@@ -8490,7 +8489,7 @@ static | |||
8490 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | 8489 | int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) |
8491 | { | 8490 | { |
8492 | struct cfs_rq *cfs_rq; | 8491 | struct cfs_rq *cfs_rq; |
8493 | struct sched_entity *se, *parent_se; | 8492 | struct sched_entity *se; |
8494 | struct rq *rq; | 8493 | struct rq *rq; |
8495 | int i; | 8494 | int i; |
8496 | 8495 | ||
@@ -8506,18 +8505,17 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8506 | for_each_possible_cpu(i) { | 8505 | for_each_possible_cpu(i) { |
8507 | rq = cpu_rq(i); | 8506 | rq = cpu_rq(i); |
8508 | 8507 | ||
8509 | cfs_rq = kmalloc_node(sizeof(struct cfs_rq), | 8508 | cfs_rq = kzalloc_node(sizeof(struct cfs_rq), |
8510 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8509 | GFP_KERNEL, cpu_to_node(i)); |
8511 | if (!cfs_rq) | 8510 | if (!cfs_rq) |
8512 | goto err; | 8511 | goto err; |
8513 | 8512 | ||
8514 | se = kmalloc_node(sizeof(struct sched_entity), | 8513 | se = kzalloc_node(sizeof(struct sched_entity), |
8515 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8514 | GFP_KERNEL, cpu_to_node(i)); |
8516 | if (!se) | 8515 | if (!se) |
8517 | goto err; | 8516 | goto err; |
8518 | 8517 | ||
8519 | parent_se = parent ? parent->se[i] : NULL; | 8518 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]); |
8520 | init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent_se); | ||
8521 | } | 8519 | } |
8522 | 8520 | ||
8523 | return 1; | 8521 | return 1; |
@@ -8578,7 +8576,7 @@ static | |||
8578 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | 8576 | int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) |
8579 | { | 8577 | { |
8580 | struct rt_rq *rt_rq; | 8578 | struct rt_rq *rt_rq; |
8581 | struct sched_rt_entity *rt_se, *parent_se; | 8579 | struct sched_rt_entity *rt_se; |
8582 | struct rq *rq; | 8580 | struct rq *rq; |
8583 | int i; | 8581 | int i; |
8584 | 8582 | ||
@@ -8595,18 +8593,17 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8595 | for_each_possible_cpu(i) { | 8593 | for_each_possible_cpu(i) { |
8596 | rq = cpu_rq(i); | 8594 | rq = cpu_rq(i); |
8597 | 8595 | ||
8598 | rt_rq = kmalloc_node(sizeof(struct rt_rq), | 8596 | rt_rq = kzalloc_node(sizeof(struct rt_rq), |
8599 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8597 | GFP_KERNEL, cpu_to_node(i)); |
8600 | if (!rt_rq) | 8598 | if (!rt_rq) |
8601 | goto err; | 8599 | goto err; |
8602 | 8600 | ||
8603 | rt_se = kmalloc_node(sizeof(struct sched_rt_entity), | 8601 | rt_se = kzalloc_node(sizeof(struct sched_rt_entity), |
8604 | GFP_KERNEL|__GFP_ZERO, cpu_to_node(i)); | 8602 | GFP_KERNEL, cpu_to_node(i)); |
8605 | if (!rt_se) | 8603 | if (!rt_se) |
8606 | goto err; | 8604 | goto err; |
8607 | 8605 | ||
8608 | parent_se = parent ? parent->rt_se[i] : NULL; | 8606 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]); |
8609 | init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent_se); | ||
8610 | } | 8607 | } |
8611 | 8608 | ||
8612 | return 1; | 8609 | return 1; |
@@ -9249,11 +9246,12 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
9249 | * (balbir@in.ibm.com). | 9246 | * (balbir@in.ibm.com). |
9250 | */ | 9247 | */ |
9251 | 9248 | ||
9252 | /* track cpu usage of a group of tasks */ | 9249 | /* track cpu usage of a group of tasks and its child groups */ |
9253 | struct cpuacct { | 9250 | struct cpuacct { |
9254 | struct cgroup_subsys_state css; | 9251 | struct cgroup_subsys_state css; |
9255 | /* cpuusage holds pointer to a u64-type object on every cpu */ | 9252 | /* cpuusage holds pointer to a u64-type object on every cpu */ |
9256 | u64 *cpuusage; | 9253 | u64 *cpuusage; |
9254 | struct cpuacct *parent; | ||
9257 | }; | 9255 | }; |
9258 | 9256 | ||
9259 | struct cgroup_subsys cpuacct_subsys; | 9257 | struct cgroup_subsys cpuacct_subsys; |
@@ -9287,6 +9285,9 @@ static struct cgroup_subsys_state *cpuacct_create( | |||
9287 | return ERR_PTR(-ENOMEM); | 9285 | return ERR_PTR(-ENOMEM); |
9288 | } | 9286 | } |
9289 | 9287 | ||
9288 | if (cgrp->parent) | ||
9289 | ca->parent = cgroup_ca(cgrp->parent); | ||
9290 | |||
9290 | return &ca->css; | 9291 | return &ca->css; |
9291 | } | 9292 | } |
9292 | 9293 | ||
@@ -9300,6 +9301,41 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9300 | kfree(ca); | 9301 | kfree(ca); |
9301 | } | 9302 | } |
9302 | 9303 | ||
9304 | static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) | ||
9305 | { | ||
9306 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9307 | u64 data; | ||
9308 | |||
9309 | #ifndef CONFIG_64BIT | ||
9310 | /* | ||
9311 | * Take rq->lock to make 64-bit read safe on 32-bit platforms. | ||
9312 | */ | ||
9313 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9314 | data = *cpuusage; | ||
9315 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9316 | #else | ||
9317 | data = *cpuusage; | ||
9318 | #endif | ||
9319 | |||
9320 | return data; | ||
9321 | } | ||
9322 | |||
9323 | static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) | ||
9324 | { | ||
9325 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9326 | |||
9327 | #ifndef CONFIG_64BIT | ||
9328 | /* | ||
9329 | * Take rq->lock to make 64-bit write safe on 32-bit platforms. | ||
9330 | */ | ||
9331 | spin_lock_irq(&cpu_rq(cpu)->lock); | ||
9332 | *cpuusage = val; | ||
9333 | spin_unlock_irq(&cpu_rq(cpu)->lock); | ||
9334 | #else | ||
9335 | *cpuusage = val; | ||
9336 | #endif | ||
9337 | } | ||
9338 | |||
9303 | /* return total cpu usage (in nanoseconds) of a group */ | 9339 | /* return total cpu usage (in nanoseconds) of a group */ |
9304 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | 9340 | static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) |
9305 | { | 9341 | { |
@@ -9307,17 +9343,8 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft) | |||
9307 | u64 totalcpuusage = 0; | 9343 | u64 totalcpuusage = 0; |
9308 | int i; | 9344 | int i; |
9309 | 9345 | ||
9310 | for_each_possible_cpu(i) { | 9346 | for_each_present_cpu(i) |
9311 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9347 | totalcpuusage += cpuacct_cpuusage_read(ca, i); |
9312 | |||
9313 | /* | ||
9314 | * Take rq->lock to make 64-bit addition safe on 32-bit | ||
9315 | * platforms. | ||
9316 | */ | ||
9317 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9318 | totalcpuusage += *cpuusage; | ||
9319 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9320 | } | ||
9321 | 9348 | ||
9322 | return totalcpuusage; | 9349 | return totalcpuusage; |
9323 | } | 9350 | } |
@@ -9334,23 +9361,39 @@ static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype, | |||
9334 | goto out; | 9361 | goto out; |
9335 | } | 9362 | } |
9336 | 9363 | ||
9337 | for_each_possible_cpu(i) { | 9364 | for_each_present_cpu(i) |
9338 | u64 *cpuusage = percpu_ptr(ca->cpuusage, i); | 9365 | cpuacct_cpuusage_write(ca, i, 0); |
9339 | 9366 | ||
9340 | spin_lock_irq(&cpu_rq(i)->lock); | ||
9341 | *cpuusage = 0; | ||
9342 | spin_unlock_irq(&cpu_rq(i)->lock); | ||
9343 | } | ||
9344 | out: | 9367 | out: |
9345 | return err; | 9368 | return err; |
9346 | } | 9369 | } |
9347 | 9370 | ||
9371 | static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft, | ||
9372 | struct seq_file *m) | ||
9373 | { | ||
9374 | struct cpuacct *ca = cgroup_ca(cgroup); | ||
9375 | u64 percpu; | ||
9376 | int i; | ||
9377 | |||
9378 | for_each_present_cpu(i) { | ||
9379 | percpu = cpuacct_cpuusage_read(ca, i); | ||
9380 | seq_printf(m, "%llu ", (unsigned long long) percpu); | ||
9381 | } | ||
9382 | seq_printf(m, "\n"); | ||
9383 | return 0; | ||
9384 | } | ||
9385 | |||
9348 | static struct cftype files[] = { | 9386 | static struct cftype files[] = { |
9349 | { | 9387 | { |
9350 | .name = "usage", | 9388 | .name = "usage", |
9351 | .read_u64 = cpuusage_read, | 9389 | .read_u64 = cpuusage_read, |
9352 | .write_u64 = cpuusage_write, | 9390 | .write_u64 = cpuusage_write, |
9353 | }, | 9391 | }, |
9392 | { | ||
9393 | .name = "usage_percpu", | ||
9394 | .read_seq_string = cpuacct_percpu_seq_read, | ||
9395 | }, | ||
9396 | |||
9354 | }; | 9397 | }; |
9355 | 9398 | ||
9356 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | 9399 | static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) |
@@ -9366,14 +9409,16 @@ static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
9366 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) | 9409 | static void cpuacct_charge(struct task_struct *tsk, u64 cputime) |
9367 | { | 9410 | { |
9368 | struct cpuacct *ca; | 9411 | struct cpuacct *ca; |
9412 | int cpu; | ||
9369 | 9413 | ||
9370 | if (!cpuacct_subsys.active) | 9414 | if (!cpuacct_subsys.active) |
9371 | return; | 9415 | return; |
9372 | 9416 | ||
9417 | cpu = task_cpu(tsk); | ||
9373 | ca = task_ca(tsk); | 9418 | ca = task_ca(tsk); |
9374 | if (ca) { | ||
9375 | u64 *cpuusage = percpu_ptr(ca->cpuusage, task_cpu(tsk)); | ||
9376 | 9419 | ||
9420 | for (; ca; ca = ca->parent) { | ||
9421 | u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); | ||
9377 | *cpuusage += cputime; | 9422 | *cpuusage += cputime; |
9378 | } | 9423 | } |
9379 | } | 9424 | } |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 81787248b60f..e8ab096ddfe3 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | |||
118 | 118 | ||
119 | /* | 119 | /* |
120 | * scd->clock = clamp(scd->tick_gtod + delta, | 120 | * scd->clock = clamp(scd->tick_gtod + delta, |
121 | * max(scd->tick_gtod, scd->clock), | 121 | * max(scd->tick_gtod, scd->clock), |
122 | * max(scd->clock, scd->tick_gtod + TICK_NSEC)); | 122 | * scd->tick_gtod + TICK_NSEC); |
123 | */ | 123 | */ |
124 | 124 | ||
125 | clock = scd->tick_gtod + delta; | 125 | clock = scd->tick_gtod + delta; |
126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); | 126 | min_clock = wrap_max(scd->tick_gtod, scd->clock); |
127 | max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC); | 127 | max_clock = scd->tick_gtod + TICK_NSEC; |
128 | 128 | ||
129 | clock = wrap_max(clock, min_clock); | 129 | clock = wrap_max(clock, min_clock); |
130 | clock = wrap_min(clock, max_clock); | 130 | clock = wrap_min(clock, max_clock); |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 26ed8e3d1c15..4293cfa9681d 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -53,6 +53,40 @@ static unsigned long nsec_low(unsigned long long nsec) | |||
53 | 53 | ||
54 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) | 54 | #define SPLIT_NS(x) nsec_high(x), nsec_low(x) |
55 | 55 | ||
56 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
57 | static void print_cfs_group_stats(struct seq_file *m, int cpu, | ||
58 | struct task_group *tg) | ||
59 | { | ||
60 | struct sched_entity *se = tg->se[cpu]; | ||
61 | if (!se) | ||
62 | return; | ||
63 | |||
64 | #define P(F) \ | ||
65 | SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F) | ||
66 | #define PN(F) \ | ||
67 | SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F)) | ||
68 | |||
69 | PN(se->exec_start); | ||
70 | PN(se->vruntime); | ||
71 | PN(se->sum_exec_runtime); | ||
72 | #ifdef CONFIG_SCHEDSTATS | ||
73 | PN(se->wait_start); | ||
74 | PN(se->sleep_start); | ||
75 | PN(se->block_start); | ||
76 | PN(se->sleep_max); | ||
77 | PN(se->block_max); | ||
78 | PN(se->exec_max); | ||
79 | PN(se->slice_max); | ||
80 | PN(se->wait_max); | ||
81 | PN(se->wait_sum); | ||
82 | P(se->wait_count); | ||
83 | #endif | ||
84 | P(se->load.weight); | ||
85 | #undef PN | ||
86 | #undef P | ||
87 | } | ||
88 | #endif | ||
89 | |||
56 | static void | 90 | static void |
57 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) | 91 | print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) |
58 | { | 92 | { |
@@ -121,20 +155,19 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
121 | 155 | ||
122 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | 156 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) |
123 | char path[128] = ""; | 157 | char path[128] = ""; |
124 | struct cgroup *cgroup = NULL; | ||
125 | struct task_group *tg = cfs_rq->tg; | 158 | struct task_group *tg = cfs_rq->tg; |
126 | 159 | ||
127 | if (tg) | 160 | cgroup_path(tg->css.cgroup, path, sizeof(path)); |
128 | cgroup = tg->css.cgroup; | ||
129 | |||
130 | if (cgroup) | ||
131 | cgroup_path(cgroup, path, sizeof(path)); | ||
132 | 161 | ||
133 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); | 162 | SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, path); |
163 | #elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED) | ||
164 | { | ||
165 | uid_t uid = cfs_rq->tg->uid; | ||
166 | SEQ_printf(m, "\ncfs_rq[%d] for UID: %u\n", cpu, uid); | ||
167 | } | ||
134 | #else | 168 | #else |
135 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); | 169 | SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu); |
136 | #endif | 170 | #endif |
137 | |||
138 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", | 171 | SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", |
139 | SPLIT_NS(cfs_rq->exec_clock)); | 172 | SPLIT_NS(cfs_rq->exec_clock)); |
140 | 173 | ||
@@ -168,6 +201,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
168 | #ifdef CONFIG_SMP | 201 | #ifdef CONFIG_SMP |
169 | SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); | 202 | SEQ_printf(m, " .%-30s: %lu\n", "shares", cfs_rq->shares); |
170 | #endif | 203 | #endif |
204 | print_cfs_group_stats(m, cpu, cfs_rq->tg); | ||
171 | #endif | 205 | #endif |
172 | } | 206 | } |
173 | 207 | ||
@@ -175,14 +209,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) | |||
175 | { | 209 | { |
176 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) | 210 | #if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED) |
177 | char path[128] = ""; | 211 | char path[128] = ""; |
178 | struct cgroup *cgroup = NULL; | ||
179 | struct task_group *tg = rt_rq->tg; | 212 | struct task_group *tg = rt_rq->tg; |
180 | 213 | ||
181 | if (tg) | 214 | cgroup_path(tg->css.cgroup, path, sizeof(path)); |
182 | cgroup = tg->css.cgroup; | ||
183 | |||
184 | if (cgroup) | ||
185 | cgroup_path(cgroup, path, sizeof(path)); | ||
186 | 215 | ||
187 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); | 216 | SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, path); |
188 | #else | 217 | #else |
@@ -272,7 +301,7 @@ static int sched_debug_show(struct seq_file *m, void *v) | |||
272 | u64 now = ktime_to_ns(ktime_get()); | 301 | u64 now = ktime_to_ns(ktime_get()); |
273 | int cpu; | 302 | int cpu; |
274 | 303 | ||
275 | SEQ_printf(m, "Sched Debug Version: v0.07, %s %.*s\n", | 304 | SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n", |
276 | init_utsname()->release, | 305 | init_utsname()->release, |
277 | (int)strcspn(init_utsname()->version, " "), | 306 | (int)strcspn(init_utsname()->version, " "), |
278 | init_utsname()->version); | 307 | init_utsname()->version); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 98345e45b059..5ad4440f0fc4 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -492,6 +492,8 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
492 | * overflow on 32 bits): | 492 | * overflow on 32 bits): |
493 | */ | 493 | */ |
494 | delta_exec = (unsigned long)(now - curr->exec_start); | 494 | delta_exec = (unsigned long)(now - curr->exec_start); |
495 | if (!delta_exec) | ||
496 | return; | ||
495 | 497 | ||
496 | __update_curr(cfs_rq, curr, delta_exec); | 498 | __update_curr(cfs_rq, curr, delta_exec); |
497 | curr->exec_start = now; | 499 | curr->exec_start = now; |
@@ -1345,12 +1347,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | |||
1345 | { | 1347 | { |
1346 | struct task_struct *curr = rq->curr; | 1348 | struct task_struct *curr = rq->curr; |
1347 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1349 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1350 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1348 | 1351 | ||
1349 | if (unlikely(rt_prio(p->prio))) { | 1352 | update_curr(cfs_rq); |
1350 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | ||
1351 | 1353 | ||
1352 | update_rq_clock(rq); | 1354 | if (unlikely(rt_prio(p->prio))) { |
1353 | update_curr(cfs_rq); | ||
1354 | resched_task(curr); | 1355 | resched_task(curr); |
1355 | return; | 1356 | return; |
1356 | } | 1357 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index d9ba9d5f99d6..51d2af3e6191 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -77,7 +77,7 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | #define for_each_leaf_rt_rq(rt_rq, rq) \ | 79 | #define for_each_leaf_rt_rq(rt_rq, rq) \ |
80 | list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) | 80 | list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) |
81 | 81 | ||
82 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 82 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
83 | { | 83 | { |
@@ -537,13 +537,13 @@ static void update_curr_rt(struct rq *rq) | |||
537 | for_each_sched_rt_entity(rt_se) { | 537 | for_each_sched_rt_entity(rt_se) { |
538 | rt_rq = rt_rq_of_se(rt_se); | 538 | rt_rq = rt_rq_of_se(rt_se); |
539 | 539 | ||
540 | spin_lock(&rt_rq->rt_runtime_lock); | ||
541 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { | 540 | if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { |
541 | spin_lock(&rt_rq->rt_runtime_lock); | ||
542 | rt_rq->rt_time += delta_exec; | 542 | rt_rq->rt_time += delta_exec; |
543 | if (sched_rt_runtime_exceeded(rt_rq)) | 543 | if (sched_rt_runtime_exceeded(rt_rq)) |
544 | resched_task(curr); | 544 | resched_task(curr); |
545 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
545 | } | 546 | } |
546 | spin_unlock(&rt_rq->rt_runtime_lock); | ||
547 | } | 547 | } |
548 | } | 548 | } |
549 | 549 | ||
@@ -909,9 +909,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p) | |||
909 | /* Only try algorithms three times */ | 909 | /* Only try algorithms three times */ |
910 | #define RT_MAX_TRIES 3 | 910 | #define RT_MAX_TRIES 3 |
911 | 911 | ||
912 | static int double_lock_balance(struct rq *this_rq, struct rq *busiest); | ||
913 | static void double_unlock_balance(struct rq *this_rq, struct rq *busiest); | ||
914 | |||
915 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); | 912 | static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); |
916 | 913 | ||
917 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) | 914 | static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 7dbf72a2b02c..3b01098164c8 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
@@ -31,7 +31,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
31 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, | 31 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, |
32 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | 32 | rq->sched_switch, rq->sched_count, rq->sched_goidle, |
33 | rq->ttwu_count, rq->ttwu_local, | 33 | rq->ttwu_count, rq->ttwu_local, |
34 | rq->rq_sched_info.cpu_time, | 34 | rq->rq_cpu_time, |
35 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); | 35 | rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); |
36 | 36 | ||
37 | seq_printf(seq, "\n"); | 37 | seq_printf(seq, "\n"); |
@@ -123,7 +123,7 @@ static inline void | |||
123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) | 123 | rq_sched_info_depart(struct rq *rq, unsigned long long delta) |
124 | { | 124 | { |
125 | if (rq) | 125 | if (rq) |
126 | rq->rq_sched_info.cpu_time += delta; | 126 | rq->rq_cpu_time += delta; |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void | 129 | static inline void |
@@ -236,7 +236,6 @@ static inline void sched_info_depart(struct task_struct *t) | |||
236 | unsigned long long delta = task_rq(t)->clock - | 236 | unsigned long long delta = task_rq(t)->clock - |
237 | t->sched_info.last_arrival; | 237 | t->sched_info.last_arrival; |
238 | 238 | ||
239 | t->sched_info.cpu_time += delta; | ||
240 | rq_sched_info_depart(task_rq(t), delta); | 239 | rq_sched_info_depart(task_rq(t), delta); |
241 | 240 | ||
242 | if (t->state == TASK_RUNNING) | 241 | if (t->state == TASK_RUNNING) |
diff --git a/kernel/signal.c b/kernel/signal.c index 4530fc654455..8e95855ff3cf 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -41,6 +41,8 @@ | |||
41 | 41 | ||
42 | static struct kmem_cache *sigqueue_cachep; | 42 | static struct kmem_cache *sigqueue_cachep; |
43 | 43 | ||
44 | DEFINE_TRACE(sched_signal_send); | ||
45 | |||
44 | static void __user *sig_handler(struct task_struct *t, int sig) | 46 | static void __user *sig_handler(struct task_struct *t, int sig) |
45 | { | 47 | { |
46 | return t->sighand->action[sig - 1].sa.sa_handler; | 48 | return t->sighand->action[sig - 1].sa.sa_handler; |
@@ -177,6 +179,11 @@ int next_signal(struct sigpending *pending, sigset_t *mask) | |||
177 | return sig; | 179 | return sig; |
178 | } | 180 | } |
179 | 181 | ||
182 | /* | ||
183 | * allocate a new signal queue record | ||
184 | * - this may be called without locks if and only if t == current, otherwise an | ||
185 | * appopriate lock must be held to stop the target task from exiting | ||
186 | */ | ||
180 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | 187 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, |
181 | int override_rlimit) | 188 | int override_rlimit) |
182 | { | 189 | { |
@@ -184,11 +191,12 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
184 | struct user_struct *user; | 191 | struct user_struct *user; |
185 | 192 | ||
186 | /* | 193 | /* |
187 | * In order to avoid problems with "switch_user()", we want to make | 194 | * We won't get problems with the target's UID changing under us |
188 | * sure that the compiler doesn't re-load "t->user" | 195 | * because changing it requires RCU be used, and if t != current, the |
196 | * caller must be holding the RCU readlock (by way of a spinlock) and | ||
197 | * we use RCU protection here | ||
189 | */ | 198 | */ |
190 | user = t->user; | 199 | user = get_uid(__task_cred(t)->user); |
191 | barrier(); | ||
192 | atomic_inc(&user->sigpending); | 200 | atomic_inc(&user->sigpending); |
193 | if (override_rlimit || | 201 | if (override_rlimit || |
194 | atomic_read(&user->sigpending) <= | 202 | atomic_read(&user->sigpending) <= |
@@ -196,12 +204,14 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
196 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 204 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
197 | if (unlikely(q == NULL)) { | 205 | if (unlikely(q == NULL)) { |
198 | atomic_dec(&user->sigpending); | 206 | atomic_dec(&user->sigpending); |
207 | free_uid(user); | ||
199 | } else { | 208 | } else { |
200 | INIT_LIST_HEAD(&q->list); | 209 | INIT_LIST_HEAD(&q->list); |
201 | q->flags = 0; | 210 | q->flags = 0; |
202 | q->user = get_uid(user); | 211 | q->user = user; |
203 | } | 212 | } |
204 | return(q); | 213 | |
214 | return q; | ||
205 | } | 215 | } |
206 | 216 | ||
207 | static void __sigqueue_free(struct sigqueue *q) | 217 | static void __sigqueue_free(struct sigqueue *q) |
@@ -562,10 +572,12 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s) | |||
562 | 572 | ||
563 | /* | 573 | /* |
564 | * Bad permissions for sending the signal | 574 | * Bad permissions for sending the signal |
575 | * - the caller must hold at least the RCU read lock | ||
565 | */ | 576 | */ |
566 | static int check_kill_permission(int sig, struct siginfo *info, | 577 | static int check_kill_permission(int sig, struct siginfo *info, |
567 | struct task_struct *t) | 578 | struct task_struct *t) |
568 | { | 579 | { |
580 | const struct cred *cred = current_cred(), *tcred; | ||
569 | struct pid *sid; | 581 | struct pid *sid; |
570 | int error; | 582 | int error; |
571 | 583 | ||
@@ -579,8 +591,11 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
579 | if (error) | 591 | if (error) |
580 | return error; | 592 | return error; |
581 | 593 | ||
582 | if ((current->euid ^ t->suid) && (current->euid ^ t->uid) && | 594 | tcred = __task_cred(t); |
583 | (current->uid ^ t->suid) && (current->uid ^ t->uid) && | 595 | if ((cred->euid ^ tcred->suid) && |
596 | (cred->euid ^ tcred->uid) && | ||
597 | (cred->uid ^ tcred->suid) && | ||
598 | (cred->uid ^ tcred->uid) && | ||
584 | !capable(CAP_KILL)) { | 599 | !capable(CAP_KILL)) { |
585 | switch (sig) { | 600 | switch (sig) { |
586 | case SIGCONT: | 601 | case SIGCONT: |
@@ -844,7 +859,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
844 | q->info.si_errno = 0; | 859 | q->info.si_errno = 0; |
845 | q->info.si_code = SI_USER; | 860 | q->info.si_code = SI_USER; |
846 | q->info.si_pid = task_pid_vnr(current); | 861 | q->info.si_pid = task_pid_vnr(current); |
847 | q->info.si_uid = current->uid; | 862 | q->info.si_uid = current_uid(); |
848 | break; | 863 | break; |
849 | case (unsigned long) SEND_SIG_PRIV: | 864 | case (unsigned long) SEND_SIG_PRIV: |
850 | q->info.si_signo = sig; | 865 | q->info.si_signo = sig; |
@@ -1008,6 +1023,10 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long | |||
1008 | return sighand; | 1023 | return sighand; |
1009 | } | 1024 | } |
1010 | 1025 | ||
1026 | /* | ||
1027 | * send signal info to all the members of a group | ||
1028 | * - the caller must hold the RCU read lock at least | ||
1029 | */ | ||
1011 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | 1030 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1012 | { | 1031 | { |
1013 | unsigned long flags; | 1032 | unsigned long flags; |
@@ -1029,8 +1048,8 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) | |||
1029 | /* | 1048 | /* |
1030 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty | 1049 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1031 | * control characters do (^C, ^Z etc) | 1050 | * control characters do (^C, ^Z etc) |
1051 | * - the caller must hold at least a readlock on tasklist_lock | ||
1032 | */ | 1052 | */ |
1033 | |||
1034 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) | 1053 | int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp) |
1035 | { | 1054 | { |
1036 | struct task_struct *p = NULL; | 1055 | struct task_struct *p = NULL; |
@@ -1086,6 +1105,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |||
1086 | { | 1105 | { |
1087 | int ret = -EINVAL; | 1106 | int ret = -EINVAL; |
1088 | struct task_struct *p; | 1107 | struct task_struct *p; |
1108 | const struct cred *pcred; | ||
1089 | 1109 | ||
1090 | if (!valid_signal(sig)) | 1110 | if (!valid_signal(sig)) |
1091 | return ret; | 1111 | return ret; |
@@ -1096,9 +1116,11 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |||
1096 | ret = -ESRCH; | 1116 | ret = -ESRCH; |
1097 | goto out_unlock; | 1117 | goto out_unlock; |
1098 | } | 1118 | } |
1099 | if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) | 1119 | pcred = __task_cred(p); |
1100 | && (euid != p->suid) && (euid != p->uid) | 1120 | if ((info == SEND_SIG_NOINFO || |
1101 | && (uid != p->suid) && (uid != p->uid)) { | 1121 | (!is_si_special(info) && SI_FROMUSER(info))) && |
1122 | euid != pcred->suid && euid != pcred->uid && | ||
1123 | uid != pcred->suid && uid != pcred->uid) { | ||
1102 | ret = -EPERM; | 1124 | ret = -EPERM; |
1103 | goto out_unlock; | 1125 | goto out_unlock; |
1104 | } | 1126 | } |
@@ -1369,10 +1391,9 @@ int do_notify_parent(struct task_struct *tsk, int sig) | |||
1369 | */ | 1391 | */ |
1370 | rcu_read_lock(); | 1392 | rcu_read_lock(); |
1371 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); | 1393 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
1394 | info.si_uid = __task_cred(tsk)->uid; | ||
1372 | rcu_read_unlock(); | 1395 | rcu_read_unlock(); |
1373 | 1396 | ||
1374 | info.si_uid = tsk->uid; | ||
1375 | |||
1376 | thread_group_cputime(tsk, &cputime); | 1397 | thread_group_cputime(tsk, &cputime); |
1377 | info.si_utime = cputime_to_jiffies(cputime.utime); | 1398 | info.si_utime = cputime_to_jiffies(cputime.utime); |
1378 | info.si_stime = cputime_to_jiffies(cputime.stime); | 1399 | info.si_stime = cputime_to_jiffies(cputime.stime); |
@@ -1440,10 +1461,9 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) | |||
1440 | */ | 1461 | */ |
1441 | rcu_read_lock(); | 1462 | rcu_read_lock(); |
1442 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); | 1463 | info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); |
1464 | info.si_uid = __task_cred(tsk)->uid; | ||
1443 | rcu_read_unlock(); | 1465 | rcu_read_unlock(); |
1444 | 1466 | ||
1445 | info.si_uid = tsk->uid; | ||
1446 | |||
1447 | info.si_utime = cputime_to_clock_t(tsk->utime); | 1467 | info.si_utime = cputime_to_clock_t(tsk->utime); |
1448 | info.si_stime = cputime_to_clock_t(tsk->stime); | 1468 | info.si_stime = cputime_to_clock_t(tsk->stime); |
1449 | 1469 | ||
@@ -1598,7 +1618,7 @@ void ptrace_notify(int exit_code) | |||
1598 | info.si_signo = SIGTRAP; | 1618 | info.si_signo = SIGTRAP; |
1599 | info.si_code = exit_code; | 1619 | info.si_code = exit_code; |
1600 | info.si_pid = task_pid_vnr(current); | 1620 | info.si_pid = task_pid_vnr(current); |
1601 | info.si_uid = current->uid; | 1621 | info.si_uid = current_uid(); |
1602 | 1622 | ||
1603 | /* Let the debugger run. */ | 1623 | /* Let the debugger run. */ |
1604 | spin_lock_irq(¤t->sighand->siglock); | 1624 | spin_lock_irq(¤t->sighand->siglock); |
@@ -1710,7 +1730,7 @@ static int ptrace_signal(int signr, siginfo_t *info, | |||
1710 | info->si_errno = 0; | 1730 | info->si_errno = 0; |
1711 | info->si_code = SI_USER; | 1731 | info->si_code = SI_USER; |
1712 | info->si_pid = task_pid_vnr(current->parent); | 1732 | info->si_pid = task_pid_vnr(current->parent); |
1713 | info->si_uid = current->parent->uid; | 1733 | info->si_uid = task_uid(current->parent); |
1714 | } | 1734 | } |
1715 | 1735 | ||
1716 | /* If the (new) signal is now blocked, requeue it. */ | 1736 | /* If the (new) signal is now blocked, requeue it. */ |
@@ -2211,7 +2231,7 @@ sys_kill(pid_t pid, int sig) | |||
2211 | info.si_errno = 0; | 2231 | info.si_errno = 0; |
2212 | info.si_code = SI_USER; | 2232 | info.si_code = SI_USER; |
2213 | info.si_pid = task_tgid_vnr(current); | 2233 | info.si_pid = task_tgid_vnr(current); |
2214 | info.si_uid = current->uid; | 2234 | info.si_uid = current_uid(); |
2215 | 2235 | ||
2216 | return kill_something_info(sig, &info, pid); | 2236 | return kill_something_info(sig, &info, pid); |
2217 | } | 2237 | } |
@@ -2228,7 +2248,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig) | |||
2228 | info.si_errno = 0; | 2248 | info.si_errno = 0; |
2229 | info.si_code = SI_TKILL; | 2249 | info.si_code = SI_TKILL; |
2230 | info.si_pid = task_tgid_vnr(current); | 2250 | info.si_pid = task_tgid_vnr(current); |
2231 | info.si_uid = current->uid; | 2251 | info.si_uid = current_uid(); |
2232 | 2252 | ||
2233 | rcu_read_lock(); | 2253 | rcu_read_lock(); |
2234 | p = find_task_by_vpid(pid); | 2254 | p = find_task_by_vpid(pid); |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 3953e4aed733..dc0b3be6b7d5 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
@@ -188,7 +188,7 @@ static void check_hung_task(struct task_struct *t, unsigned long now) | |||
188 | if ((long)(now - t->last_switch_timestamp) < | 188 | if ((long)(now - t->last_switch_timestamp) < |
189 | sysctl_hung_task_timeout_secs) | 189 | sysctl_hung_task_timeout_secs) |
190 | return; | 190 | return; |
191 | if (sysctl_hung_task_warnings < 0) | 191 | if (!sysctl_hung_task_warnings) |
192 | return; | 192 | return; |
193 | sysctl_hung_task_warnings--; | 193 | sysctl_hung_task_warnings--; |
194 | 194 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 31deba8f7d16..ebe65c2c9873 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -112,12 +112,17 @@ EXPORT_SYMBOL(cad_pid); | |||
112 | 112 | ||
113 | void (*pm_power_off_prepare)(void); | 113 | void (*pm_power_off_prepare)(void); |
114 | 114 | ||
115 | /* | ||
116 | * set the priority of a task | ||
117 | * - the caller must hold the RCU read lock | ||
118 | */ | ||
115 | static int set_one_prio(struct task_struct *p, int niceval, int error) | 119 | static int set_one_prio(struct task_struct *p, int niceval, int error) |
116 | { | 120 | { |
121 | const struct cred *cred = current_cred(), *pcred = __task_cred(p); | ||
117 | int no_nice; | 122 | int no_nice; |
118 | 123 | ||
119 | if (p->uid != current->euid && | 124 | if (pcred->uid != cred->euid && |
120 | p->euid != current->euid && !capable(CAP_SYS_NICE)) { | 125 | pcred->euid != cred->euid && !capable(CAP_SYS_NICE)) { |
121 | error = -EPERM; | 126 | error = -EPERM; |
122 | goto out; | 127 | goto out; |
123 | } | 128 | } |
@@ -141,6 +146,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) | |||
141 | { | 146 | { |
142 | struct task_struct *g, *p; | 147 | struct task_struct *g, *p; |
143 | struct user_struct *user; | 148 | struct user_struct *user; |
149 | const struct cred *cred = current_cred(); | ||
144 | int error = -EINVAL; | 150 | int error = -EINVAL; |
145 | struct pid *pgrp; | 151 | struct pid *pgrp; |
146 | 152 | ||
@@ -174,18 +180,18 @@ asmlinkage long sys_setpriority(int which, int who, int niceval) | |||
174 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 180 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
175 | break; | 181 | break; |
176 | case PRIO_USER: | 182 | case PRIO_USER: |
177 | user = current->user; | 183 | user = (struct user_struct *) cred->user; |
178 | if (!who) | 184 | if (!who) |
179 | who = current->uid; | 185 | who = cred->uid; |
180 | else | 186 | else if ((who != cred->uid) && |
181 | if ((who != current->uid) && !(user = find_user(who))) | 187 | !(user = find_user(who))) |
182 | goto out_unlock; /* No processes for this user */ | 188 | goto out_unlock; /* No processes for this user */ |
183 | 189 | ||
184 | do_each_thread(g, p) | 190 | do_each_thread(g, p) |
185 | if (p->uid == who) | 191 | if (__task_cred(p)->uid == who) |
186 | error = set_one_prio(p, niceval, error); | 192 | error = set_one_prio(p, niceval, error); |
187 | while_each_thread(g, p); | 193 | while_each_thread(g, p); |
188 | if (who != current->uid) | 194 | if (who != cred->uid) |
189 | free_uid(user); /* For find_user() */ | 195 | free_uid(user); /* For find_user() */ |
190 | break; | 196 | break; |
191 | } | 197 | } |
@@ -205,6 +211,7 @@ asmlinkage long sys_getpriority(int which, int who) | |||
205 | { | 211 | { |
206 | struct task_struct *g, *p; | 212 | struct task_struct *g, *p; |
207 | struct user_struct *user; | 213 | struct user_struct *user; |
214 | const struct cred *cred = current_cred(); | ||
208 | long niceval, retval = -ESRCH; | 215 | long niceval, retval = -ESRCH; |
209 | struct pid *pgrp; | 216 | struct pid *pgrp; |
210 | 217 | ||
@@ -236,21 +243,21 @@ asmlinkage long sys_getpriority(int which, int who) | |||
236 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); | 243 | } while_each_pid_thread(pgrp, PIDTYPE_PGID, p); |
237 | break; | 244 | break; |
238 | case PRIO_USER: | 245 | case PRIO_USER: |
239 | user = current->user; | 246 | user = (struct user_struct *) cred->user; |
240 | if (!who) | 247 | if (!who) |
241 | who = current->uid; | 248 | who = cred->uid; |
242 | else | 249 | else if ((who != cred->uid) && |
243 | if ((who != current->uid) && !(user = find_user(who))) | 250 | !(user = find_user(who))) |
244 | goto out_unlock; /* No processes for this user */ | 251 | goto out_unlock; /* No processes for this user */ |
245 | 252 | ||
246 | do_each_thread(g, p) | 253 | do_each_thread(g, p) |
247 | if (p->uid == who) { | 254 | if (__task_cred(p)->uid == who) { |
248 | niceval = 20 - task_nice(p); | 255 | niceval = 20 - task_nice(p); |
249 | if (niceval > retval) | 256 | if (niceval > retval) |
250 | retval = niceval; | 257 | retval = niceval; |
251 | } | 258 | } |
252 | while_each_thread(g, p); | 259 | while_each_thread(g, p); |
253 | if (who != current->uid) | 260 | if (who != cred->uid) |
254 | free_uid(user); /* for find_user() */ | 261 | free_uid(user); /* for find_user() */ |
255 | break; | 262 | break; |
256 | } | 263 | } |
@@ -472,46 +479,48 @@ void ctrl_alt_del(void) | |||
472 | */ | 479 | */ |
473 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) | 480 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) |
474 | { | 481 | { |
475 | int old_rgid = current->gid; | 482 | const struct cred *old; |
476 | int old_egid = current->egid; | 483 | struct cred *new; |
477 | int new_rgid = old_rgid; | ||
478 | int new_egid = old_egid; | ||
479 | int retval; | 484 | int retval; |
480 | 485 | ||
486 | new = prepare_creds(); | ||
487 | if (!new) | ||
488 | return -ENOMEM; | ||
489 | old = current_cred(); | ||
490 | |||
481 | retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); | 491 | retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); |
482 | if (retval) | 492 | if (retval) |
483 | return retval; | 493 | goto error; |
484 | 494 | ||
495 | retval = -EPERM; | ||
485 | if (rgid != (gid_t) -1) { | 496 | if (rgid != (gid_t) -1) { |
486 | if ((old_rgid == rgid) || | 497 | if (old->gid == rgid || |
487 | (current->egid==rgid) || | 498 | old->egid == rgid || |
488 | capable(CAP_SETGID)) | 499 | capable(CAP_SETGID)) |
489 | new_rgid = rgid; | 500 | new->gid = rgid; |
490 | else | 501 | else |
491 | return -EPERM; | 502 | goto error; |
492 | } | 503 | } |
493 | if (egid != (gid_t) -1) { | 504 | if (egid != (gid_t) -1) { |
494 | if ((old_rgid == egid) || | 505 | if (old->gid == egid || |
495 | (current->egid == egid) || | 506 | old->egid == egid || |
496 | (current->sgid == egid) || | 507 | old->sgid == egid || |
497 | capable(CAP_SETGID)) | 508 | capable(CAP_SETGID)) |
498 | new_egid = egid; | 509 | new->egid = egid; |
499 | else | 510 | else |
500 | return -EPERM; | 511 | goto error; |
501 | } | ||
502 | if (new_egid != old_egid) { | ||
503 | set_dumpable(current->mm, suid_dumpable); | ||
504 | smp_wmb(); | ||
505 | } | 512 | } |
513 | |||
506 | if (rgid != (gid_t) -1 || | 514 | if (rgid != (gid_t) -1 || |
507 | (egid != (gid_t) -1 && egid != old_rgid)) | 515 | (egid != (gid_t) -1 && egid != old->gid)) |
508 | current->sgid = new_egid; | 516 | new->sgid = new->egid; |
509 | current->fsgid = new_egid; | 517 | new->fsgid = new->egid; |
510 | current->egid = new_egid; | 518 | |
511 | current->gid = new_rgid; | 519 | return commit_creds(new); |
512 | key_fsgid_changed(current); | 520 | |
513 | proc_id_connector(current, PROC_EVENT_GID); | 521 | error: |
514 | return 0; | 522 | abort_creds(new); |
523 | return retval; | ||
515 | } | 524 | } |
516 | 525 | ||
517 | /* | 526 | /* |
@@ -521,56 +530,54 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid) | |||
521 | */ | 530 | */ |
522 | asmlinkage long sys_setgid(gid_t gid) | 531 | asmlinkage long sys_setgid(gid_t gid) |
523 | { | 532 | { |
524 | int old_egid = current->egid; | 533 | const struct cred *old; |
534 | struct cred *new; | ||
525 | int retval; | 535 | int retval; |
526 | 536 | ||
537 | new = prepare_creds(); | ||
538 | if (!new) | ||
539 | return -ENOMEM; | ||
540 | old = current_cred(); | ||
541 | |||
527 | retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); | 542 | retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); |
528 | if (retval) | 543 | if (retval) |
529 | return retval; | 544 | goto error; |
530 | 545 | ||
531 | if (capable(CAP_SETGID)) { | 546 | retval = -EPERM; |
532 | if (old_egid != gid) { | 547 | if (capable(CAP_SETGID)) |
533 | set_dumpable(current->mm, suid_dumpable); | 548 | new->gid = new->egid = new->sgid = new->fsgid = gid; |
534 | smp_wmb(); | 549 | else if (gid == old->gid || gid == old->sgid) |
535 | } | 550 | new->egid = new->fsgid = gid; |
536 | current->gid = current->egid = current->sgid = current->fsgid = gid; | ||
537 | } else if ((gid == current->gid) || (gid == current->sgid)) { | ||
538 | if (old_egid != gid) { | ||
539 | set_dumpable(current->mm, suid_dumpable); | ||
540 | smp_wmb(); | ||
541 | } | ||
542 | current->egid = current->fsgid = gid; | ||
543 | } | ||
544 | else | 551 | else |
545 | return -EPERM; | 552 | goto error; |
546 | 553 | ||
547 | key_fsgid_changed(current); | 554 | return commit_creds(new); |
548 | proc_id_connector(current, PROC_EVENT_GID); | 555 | |
549 | return 0; | 556 | error: |
557 | abort_creds(new); | ||
558 | return retval; | ||
550 | } | 559 | } |
551 | 560 | ||
552 | static int set_user(uid_t new_ruid, int dumpclear) | 561 | /* |
562 | * change the user struct in a credentials set to match the new UID | ||
563 | */ | ||
564 | static int set_user(struct cred *new) | ||
553 | { | 565 | { |
554 | struct user_struct *new_user; | 566 | struct user_struct *new_user; |
555 | 567 | ||
556 | new_user = alloc_uid(current->nsproxy->user_ns, new_ruid); | 568 | new_user = alloc_uid(current_user_ns(), new->uid); |
557 | if (!new_user) | 569 | if (!new_user) |
558 | return -EAGAIN; | 570 | return -EAGAIN; |
559 | 571 | ||
560 | if (atomic_read(&new_user->processes) >= | 572 | if (atomic_read(&new_user->processes) >= |
561 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && | 573 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && |
562 | new_user != current->nsproxy->user_ns->root_user) { | 574 | new_user != INIT_USER) { |
563 | free_uid(new_user); | 575 | free_uid(new_user); |
564 | return -EAGAIN; | 576 | return -EAGAIN; |
565 | } | 577 | } |
566 | 578 | ||
567 | switch_uid(new_user); | 579 | free_uid(new->user); |
568 | 580 | new->user = new_user; | |
569 | if (dumpclear) { | ||
570 | set_dumpable(current->mm, suid_dumpable); | ||
571 | smp_wmb(); | ||
572 | } | ||
573 | current->uid = new_ruid; | ||
574 | return 0; | 581 | return 0; |
575 | } | 582 | } |
576 | 583 | ||
@@ -591,54 +598,56 @@ static int set_user(uid_t new_ruid, int dumpclear) | |||
591 | */ | 598 | */ |
592 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) | 599 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) |
593 | { | 600 | { |
594 | int old_ruid, old_euid, old_suid, new_ruid, new_euid; | 601 | const struct cred *old; |
602 | struct cred *new; | ||
595 | int retval; | 603 | int retval; |
596 | 604 | ||
605 | new = prepare_creds(); | ||
606 | if (!new) | ||
607 | return -ENOMEM; | ||
608 | old = current_cred(); | ||
609 | |||
597 | retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); | 610 | retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); |
598 | if (retval) | 611 | if (retval) |
599 | return retval; | 612 | goto error; |
600 | |||
601 | new_ruid = old_ruid = current->uid; | ||
602 | new_euid = old_euid = current->euid; | ||
603 | old_suid = current->suid; | ||
604 | 613 | ||
614 | retval = -EPERM; | ||
605 | if (ruid != (uid_t) -1) { | 615 | if (ruid != (uid_t) -1) { |
606 | new_ruid = ruid; | 616 | new->uid = ruid; |
607 | if ((old_ruid != ruid) && | 617 | if (old->uid != ruid && |
608 | (current->euid != ruid) && | 618 | old->euid != ruid && |
609 | !capable(CAP_SETUID)) | 619 | !capable(CAP_SETUID)) |
610 | return -EPERM; | 620 | goto error; |
611 | } | 621 | } |
612 | 622 | ||
613 | if (euid != (uid_t) -1) { | 623 | if (euid != (uid_t) -1) { |
614 | new_euid = euid; | 624 | new->euid = euid; |
615 | if ((old_ruid != euid) && | 625 | if (old->uid != euid && |
616 | (current->euid != euid) && | 626 | old->euid != euid && |
617 | (current->suid != euid) && | 627 | old->suid != euid && |
618 | !capable(CAP_SETUID)) | 628 | !capable(CAP_SETUID)) |
619 | return -EPERM; | 629 | goto error; |
620 | } | 630 | } |
621 | 631 | ||
622 | if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) | 632 | retval = -EAGAIN; |
623 | return -EAGAIN; | 633 | if (new->uid != old->uid && set_user(new) < 0) |
634 | goto error; | ||
624 | 635 | ||
625 | if (new_euid != old_euid) { | ||
626 | set_dumpable(current->mm, suid_dumpable); | ||
627 | smp_wmb(); | ||
628 | } | ||
629 | current->fsuid = current->euid = new_euid; | ||
630 | if (ruid != (uid_t) -1 || | 636 | if (ruid != (uid_t) -1 || |
631 | (euid != (uid_t) -1 && euid != old_ruid)) | 637 | (euid != (uid_t) -1 && euid != old->uid)) |
632 | current->suid = current->euid; | 638 | new->suid = new->euid; |
633 | current->fsuid = current->euid; | 639 | new->fsuid = new->euid; |
634 | 640 | ||
635 | key_fsuid_changed(current); | 641 | retval = security_task_fix_setuid(new, old, LSM_SETID_RE); |
636 | proc_id_connector(current, PROC_EVENT_UID); | 642 | if (retval < 0) |
637 | 643 | goto error; | |
638 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); | ||
639 | } | ||
640 | 644 | ||
645 | return commit_creds(new); | ||
641 | 646 | ||
647 | error: | ||
648 | abort_creds(new); | ||
649 | return retval; | ||
650 | } | ||
642 | 651 | ||
643 | /* | 652 | /* |
644 | * setuid() is implemented like SysV with SAVED_IDS | 653 | * setuid() is implemented like SysV with SAVED_IDS |
@@ -653,36 +662,41 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) | |||
653 | */ | 662 | */ |
654 | asmlinkage long sys_setuid(uid_t uid) | 663 | asmlinkage long sys_setuid(uid_t uid) |
655 | { | 664 | { |
656 | int old_euid = current->euid; | 665 | const struct cred *old; |
657 | int old_ruid, old_suid, new_suid; | 666 | struct cred *new; |
658 | int retval; | 667 | int retval; |
659 | 668 | ||
669 | new = prepare_creds(); | ||
670 | if (!new) | ||
671 | return -ENOMEM; | ||
672 | old = current_cred(); | ||
673 | |||
660 | retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); | 674 | retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); |
661 | if (retval) | 675 | if (retval) |
662 | return retval; | 676 | goto error; |
663 | 677 | ||
664 | old_ruid = current->uid; | 678 | retval = -EPERM; |
665 | old_suid = current->suid; | ||
666 | new_suid = old_suid; | ||
667 | |||
668 | if (capable(CAP_SETUID)) { | 679 | if (capable(CAP_SETUID)) { |
669 | if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) | 680 | new->suid = new->uid = uid; |
670 | return -EAGAIN; | 681 | if (uid != old->uid && set_user(new) < 0) { |
671 | new_suid = uid; | 682 | retval = -EAGAIN; |
672 | } else if ((uid != current->uid) && (uid != new_suid)) | 683 | goto error; |
673 | return -EPERM; | 684 | } |
674 | 685 | } else if (uid != old->uid && uid != new->suid) { | |
675 | if (old_euid != uid) { | 686 | goto error; |
676 | set_dumpable(current->mm, suid_dumpable); | ||
677 | smp_wmb(); | ||
678 | } | 687 | } |
679 | current->fsuid = current->euid = uid; | ||
680 | current->suid = new_suid; | ||
681 | 688 | ||
682 | key_fsuid_changed(current); | 689 | new->fsuid = new->euid = uid; |
683 | proc_id_connector(current, PROC_EVENT_UID); | 690 | |
691 | retval = security_task_fix_setuid(new, old, LSM_SETID_ID); | ||
692 | if (retval < 0) | ||
693 | goto error; | ||
684 | 694 | ||
685 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); | 695 | return commit_creds(new); |
696 | |||
697 | error: | ||
698 | abort_creds(new); | ||
699 | return retval; | ||
686 | } | 700 | } |
687 | 701 | ||
688 | 702 | ||
@@ -692,54 +706,63 @@ asmlinkage long sys_setuid(uid_t uid) | |||
692 | */ | 706 | */ |
693 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) | 707 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) |
694 | { | 708 | { |
695 | int old_ruid = current->uid; | 709 | const struct cred *old; |
696 | int old_euid = current->euid; | 710 | struct cred *new; |
697 | int old_suid = current->suid; | ||
698 | int retval; | 711 | int retval; |
699 | 712 | ||
713 | new = prepare_creds(); | ||
714 | if (!new) | ||
715 | return -ENOMEM; | ||
716 | |||
700 | retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); | 717 | retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); |
701 | if (retval) | 718 | if (retval) |
702 | return retval; | 719 | goto error; |
720 | old = current_cred(); | ||
703 | 721 | ||
722 | retval = -EPERM; | ||
704 | if (!capable(CAP_SETUID)) { | 723 | if (!capable(CAP_SETUID)) { |
705 | if ((ruid != (uid_t) -1) && (ruid != current->uid) && | 724 | if (ruid != (uid_t) -1 && ruid != old->uid && |
706 | (ruid != current->euid) && (ruid != current->suid)) | 725 | ruid != old->euid && ruid != old->suid) |
707 | return -EPERM; | 726 | goto error; |
708 | if ((euid != (uid_t) -1) && (euid != current->uid) && | 727 | if (euid != (uid_t) -1 && euid != old->uid && |
709 | (euid != current->euid) && (euid != current->suid)) | 728 | euid != old->euid && euid != old->suid) |
710 | return -EPERM; | 729 | goto error; |
711 | if ((suid != (uid_t) -1) && (suid != current->uid) && | 730 | if (suid != (uid_t) -1 && suid != old->uid && |
712 | (suid != current->euid) && (suid != current->suid)) | 731 | suid != old->euid && suid != old->suid) |
713 | return -EPERM; | 732 | goto error; |
714 | } | 733 | } |
734 | |||
735 | retval = -EAGAIN; | ||
715 | if (ruid != (uid_t) -1) { | 736 | if (ruid != (uid_t) -1) { |
716 | if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) | 737 | new->uid = ruid; |
717 | return -EAGAIN; | 738 | if (ruid != old->uid && set_user(new) < 0) |
739 | goto error; | ||
718 | } | 740 | } |
719 | if (euid != (uid_t) -1) { | 741 | if (euid != (uid_t) -1) |
720 | if (euid != current->euid) { | 742 | new->euid = euid; |
721 | set_dumpable(current->mm, suid_dumpable); | ||
722 | smp_wmb(); | ||
723 | } | ||
724 | current->euid = euid; | ||
725 | } | ||
726 | current->fsuid = current->euid; | ||
727 | if (suid != (uid_t) -1) | 743 | if (suid != (uid_t) -1) |
728 | current->suid = suid; | 744 | new->suid = suid; |
745 | new->fsuid = new->euid; | ||
746 | |||
747 | retval = security_task_fix_setuid(new, old, LSM_SETID_RES); | ||
748 | if (retval < 0) | ||
749 | goto error; | ||
729 | 750 | ||
730 | key_fsuid_changed(current); | 751 | return commit_creds(new); |
731 | proc_id_connector(current, PROC_EVENT_UID); | ||
732 | 752 | ||
733 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); | 753 | error: |
754 | abort_creds(new); | ||
755 | return retval; | ||
734 | } | 756 | } |
735 | 757 | ||
736 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) | 758 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) |
737 | { | 759 | { |
760 | const struct cred *cred = current_cred(); | ||
738 | int retval; | 761 | int retval; |
739 | 762 | ||
740 | if (!(retval = put_user(current->uid, ruid)) && | 763 | if (!(retval = put_user(cred->uid, ruid)) && |
741 | !(retval = put_user(current->euid, euid))) | 764 | !(retval = put_user(cred->euid, euid))) |
742 | retval = put_user(current->suid, suid); | 765 | retval = put_user(cred->suid, suid); |
743 | 766 | ||
744 | return retval; | 767 | return retval; |
745 | } | 768 | } |
@@ -749,48 +772,55 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us | |||
749 | */ | 772 | */ |
750 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) | 773 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) |
751 | { | 774 | { |
775 | const struct cred *old; | ||
776 | struct cred *new; | ||
752 | int retval; | 777 | int retval; |
753 | 778 | ||
779 | new = prepare_creds(); | ||
780 | if (!new) | ||
781 | return -ENOMEM; | ||
782 | old = current_cred(); | ||
783 | |||
754 | retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); | 784 | retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); |
755 | if (retval) | 785 | if (retval) |
756 | return retval; | 786 | goto error; |
757 | 787 | ||
788 | retval = -EPERM; | ||
758 | if (!capable(CAP_SETGID)) { | 789 | if (!capable(CAP_SETGID)) { |
759 | if ((rgid != (gid_t) -1) && (rgid != current->gid) && | 790 | if (rgid != (gid_t) -1 && rgid != old->gid && |
760 | (rgid != current->egid) && (rgid != current->sgid)) | 791 | rgid != old->egid && rgid != old->sgid) |
761 | return -EPERM; | 792 | goto error; |
762 | if ((egid != (gid_t) -1) && (egid != current->gid) && | 793 | if (egid != (gid_t) -1 && egid != old->gid && |
763 | (egid != current->egid) && (egid != current->sgid)) | 794 | egid != old->egid && egid != old->sgid) |
764 | return -EPERM; | 795 | goto error; |
765 | if ((sgid != (gid_t) -1) && (sgid != current->gid) && | 796 | if (sgid != (gid_t) -1 && sgid != old->gid && |
766 | (sgid != current->egid) && (sgid != current->sgid)) | 797 | sgid != old->egid && sgid != old->sgid) |
767 | return -EPERM; | 798 | goto error; |
768 | } | 799 | } |
769 | if (egid != (gid_t) -1) { | 800 | |
770 | if (egid != current->egid) { | ||
771 | set_dumpable(current->mm, suid_dumpable); | ||
772 | smp_wmb(); | ||
773 | } | ||
774 | current->egid = egid; | ||
775 | } | ||
776 | current->fsgid = current->egid; | ||
777 | if (rgid != (gid_t) -1) | 801 | if (rgid != (gid_t) -1) |
778 | current->gid = rgid; | 802 | new->gid = rgid; |
803 | if (egid != (gid_t) -1) | ||
804 | new->egid = egid; | ||
779 | if (sgid != (gid_t) -1) | 805 | if (sgid != (gid_t) -1) |
780 | current->sgid = sgid; | 806 | new->sgid = sgid; |
807 | new->fsgid = new->egid; | ||
781 | 808 | ||
782 | key_fsgid_changed(current); | 809 | return commit_creds(new); |
783 | proc_id_connector(current, PROC_EVENT_GID); | 810 | |
784 | return 0; | 811 | error: |
812 | abort_creds(new); | ||
813 | return retval; | ||
785 | } | 814 | } |
786 | 815 | ||
787 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) | 816 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) |
788 | { | 817 | { |
818 | const struct cred *cred = current_cred(); | ||
789 | int retval; | 819 | int retval; |
790 | 820 | ||
791 | if (!(retval = put_user(current->gid, rgid)) && | 821 | if (!(retval = put_user(cred->gid, rgid)) && |
792 | !(retval = put_user(current->egid, egid))) | 822 | !(retval = put_user(cred->egid, egid))) |
793 | retval = put_user(current->sgid, sgid); | 823 | retval = put_user(cred->sgid, sgid); |
794 | 824 | ||
795 | return retval; | 825 | return retval; |
796 | } | 826 | } |
@@ -804,27 +834,35 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us | |||
804 | */ | 834 | */ |
805 | asmlinkage long sys_setfsuid(uid_t uid) | 835 | asmlinkage long sys_setfsuid(uid_t uid) |
806 | { | 836 | { |
807 | int old_fsuid; | 837 | const struct cred *old; |
838 | struct cred *new; | ||
839 | uid_t old_fsuid; | ||
808 | 840 | ||
809 | old_fsuid = current->fsuid; | 841 | new = prepare_creds(); |
810 | if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) | 842 | if (!new) |
811 | return old_fsuid; | 843 | return current_fsuid(); |
844 | old = current_cred(); | ||
845 | old_fsuid = old->fsuid; | ||
812 | 846 | ||
813 | if (uid == current->uid || uid == current->euid || | 847 | if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0) |
814 | uid == current->suid || uid == current->fsuid || | 848 | goto error; |
849 | |||
850 | if (uid == old->uid || uid == old->euid || | ||
851 | uid == old->suid || uid == old->fsuid || | ||
815 | capable(CAP_SETUID)) { | 852 | capable(CAP_SETUID)) { |
816 | if (uid != old_fsuid) { | 853 | if (uid != old_fsuid) { |
817 | set_dumpable(current->mm, suid_dumpable); | 854 | new->fsuid = uid; |
818 | smp_wmb(); | 855 | if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) |
856 | goto change_okay; | ||
819 | } | 857 | } |
820 | current->fsuid = uid; | ||
821 | } | 858 | } |
822 | 859 | ||
823 | key_fsuid_changed(current); | 860 | error: |
824 | proc_id_connector(current, PROC_EVENT_UID); | 861 | abort_creds(new); |
825 | 862 | return old_fsuid; | |
826 | security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); | ||
827 | 863 | ||
864 | change_okay: | ||
865 | commit_creds(new); | ||
828 | return old_fsuid; | 866 | return old_fsuid; |
829 | } | 867 | } |
830 | 868 | ||
@@ -833,23 +871,34 @@ asmlinkage long sys_setfsuid(uid_t uid) | |||
833 | */ | 871 | */ |
834 | asmlinkage long sys_setfsgid(gid_t gid) | 872 | asmlinkage long sys_setfsgid(gid_t gid) |
835 | { | 873 | { |
836 | int old_fsgid; | 874 | const struct cred *old; |
875 | struct cred *new; | ||
876 | gid_t old_fsgid; | ||
877 | |||
878 | new = prepare_creds(); | ||
879 | if (!new) | ||
880 | return current_fsgid(); | ||
881 | old = current_cred(); | ||
882 | old_fsgid = old->fsgid; | ||
837 | 883 | ||
838 | old_fsgid = current->fsgid; | ||
839 | if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) | 884 | if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) |
840 | return old_fsgid; | 885 | goto error; |
841 | 886 | ||
842 | if (gid == current->gid || gid == current->egid || | 887 | if (gid == old->gid || gid == old->egid || |
843 | gid == current->sgid || gid == current->fsgid || | 888 | gid == old->sgid || gid == old->fsgid || |
844 | capable(CAP_SETGID)) { | 889 | capable(CAP_SETGID)) { |
845 | if (gid != old_fsgid) { | 890 | if (gid != old_fsgid) { |
846 | set_dumpable(current->mm, suid_dumpable); | 891 | new->fsgid = gid; |
847 | smp_wmb(); | 892 | goto change_okay; |
848 | } | 893 | } |
849 | current->fsgid = gid; | ||
850 | key_fsgid_changed(current); | ||
851 | proc_id_connector(current, PROC_EVENT_GID); | ||
852 | } | 894 | } |
895 | |||
896 | error: | ||
897 | abort_creds(new); | ||
898 | return old_fsgid; | ||
899 | |||
900 | change_okay: | ||
901 | commit_creds(new); | ||
853 | return old_fsgid; | 902 | return old_fsgid; |
854 | } | 903 | } |
855 | 904 | ||
@@ -1118,7 +1167,7 @@ EXPORT_SYMBOL(groups_free); | |||
1118 | 1167 | ||
1119 | /* export the group_info to a user-space array */ | 1168 | /* export the group_info to a user-space array */ |
1120 | static int groups_to_user(gid_t __user *grouplist, | 1169 | static int groups_to_user(gid_t __user *grouplist, |
1121 | struct group_info *group_info) | 1170 | const struct group_info *group_info) |
1122 | { | 1171 | { |
1123 | int i; | 1172 | int i; |
1124 | unsigned int count = group_info->ngroups; | 1173 | unsigned int count = group_info->ngroups; |
@@ -1186,7 +1235,7 @@ static void groups_sort(struct group_info *group_info) | |||
1186 | } | 1235 | } |
1187 | 1236 | ||
1188 | /* a simple bsearch */ | 1237 | /* a simple bsearch */ |
1189 | int groups_search(struct group_info *group_info, gid_t grp) | 1238 | int groups_search(const struct group_info *group_info, gid_t grp) |
1190 | { | 1239 | { |
1191 | unsigned int left, right; | 1240 | unsigned int left, right; |
1192 | 1241 | ||
@@ -1208,51 +1257,74 @@ int groups_search(struct group_info *group_info, gid_t grp) | |||
1208 | return 0; | 1257 | return 0; |
1209 | } | 1258 | } |
1210 | 1259 | ||
1211 | /* validate and set current->group_info */ | 1260 | /** |
1212 | int set_current_groups(struct group_info *group_info) | 1261 | * set_groups - Change a group subscription in a set of credentials |
1262 | * @new: The newly prepared set of credentials to alter | ||
1263 | * @group_info: The group list to install | ||
1264 | * | ||
1265 | * Validate a group subscription and, if valid, insert it into a set | ||
1266 | * of credentials. | ||
1267 | */ | ||
1268 | int set_groups(struct cred *new, struct group_info *group_info) | ||
1213 | { | 1269 | { |
1214 | int retval; | 1270 | int retval; |
1215 | struct group_info *old_info; | ||
1216 | 1271 | ||
1217 | retval = security_task_setgroups(group_info); | 1272 | retval = security_task_setgroups(group_info); |
1218 | if (retval) | 1273 | if (retval) |
1219 | return retval; | 1274 | return retval; |
1220 | 1275 | ||
1276 | put_group_info(new->group_info); | ||
1221 | groups_sort(group_info); | 1277 | groups_sort(group_info); |
1222 | get_group_info(group_info); | 1278 | get_group_info(group_info); |
1279 | new->group_info = group_info; | ||
1280 | return 0; | ||
1281 | } | ||
1282 | |||
1283 | EXPORT_SYMBOL(set_groups); | ||
1223 | 1284 | ||
1224 | task_lock(current); | 1285 | /** |
1225 | old_info = current->group_info; | 1286 | * set_current_groups - Change current's group subscription |
1226 | current->group_info = group_info; | 1287 | * @group_info: The group list to impose |
1227 | task_unlock(current); | 1288 | * |
1289 | * Validate a group subscription and, if valid, impose it upon current's task | ||
1290 | * security record. | ||
1291 | */ | ||
1292 | int set_current_groups(struct group_info *group_info) | ||
1293 | { | ||
1294 | struct cred *new; | ||
1295 | int ret; | ||
1228 | 1296 | ||
1229 | put_group_info(old_info); | 1297 | new = prepare_creds(); |
1298 | if (!new) | ||
1299 | return -ENOMEM; | ||
1230 | 1300 | ||
1231 | return 0; | 1301 | ret = set_groups(new, group_info); |
1302 | if (ret < 0) { | ||
1303 | abort_creds(new); | ||
1304 | return ret; | ||
1305 | } | ||
1306 | |||
1307 | return commit_creds(new); | ||
1232 | } | 1308 | } |
1233 | 1309 | ||
1234 | EXPORT_SYMBOL(set_current_groups); | 1310 | EXPORT_SYMBOL(set_current_groups); |
1235 | 1311 | ||
1236 | asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) | 1312 | asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) |
1237 | { | 1313 | { |
1238 | int i = 0; | 1314 | const struct cred *cred = current_cred(); |
1239 | 1315 | int i; | |
1240 | /* | ||
1241 | * SMP: Nobody else can change our grouplist. Thus we are | ||
1242 | * safe. | ||
1243 | */ | ||
1244 | 1316 | ||
1245 | if (gidsetsize < 0) | 1317 | if (gidsetsize < 0) |
1246 | return -EINVAL; | 1318 | return -EINVAL; |
1247 | 1319 | ||
1248 | /* no need to grab task_lock here; it cannot change */ | 1320 | /* no need to grab task_lock here; it cannot change */ |
1249 | i = current->group_info->ngroups; | 1321 | i = cred->group_info->ngroups; |
1250 | if (gidsetsize) { | 1322 | if (gidsetsize) { |
1251 | if (i > gidsetsize) { | 1323 | if (i > gidsetsize) { |
1252 | i = -EINVAL; | 1324 | i = -EINVAL; |
1253 | goto out; | 1325 | goto out; |
1254 | } | 1326 | } |
1255 | if (groups_to_user(grouplist, current->group_info)) { | 1327 | if (groups_to_user(grouplist, cred->group_info)) { |
1256 | i = -EFAULT; | 1328 | i = -EFAULT; |
1257 | goto out; | 1329 | goto out; |
1258 | } | 1330 | } |
@@ -1296,9 +1368,11 @@ asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) | |||
1296 | */ | 1368 | */ |
1297 | int in_group_p(gid_t grp) | 1369 | int in_group_p(gid_t grp) |
1298 | { | 1370 | { |
1371 | const struct cred *cred = current_cred(); | ||
1299 | int retval = 1; | 1372 | int retval = 1; |
1300 | if (grp != current->fsgid) | 1373 | |
1301 | retval = groups_search(current->group_info, grp); | 1374 | if (grp != cred->fsgid) |
1375 | retval = groups_search(cred->group_info, grp); | ||
1302 | return retval; | 1376 | return retval; |
1303 | } | 1377 | } |
1304 | 1378 | ||
@@ -1306,9 +1380,11 @@ EXPORT_SYMBOL(in_group_p); | |||
1306 | 1380 | ||
1307 | int in_egroup_p(gid_t grp) | 1381 | int in_egroup_p(gid_t grp) |
1308 | { | 1382 | { |
1383 | const struct cred *cred = current_cred(); | ||
1309 | int retval = 1; | 1384 | int retval = 1; |
1310 | if (grp != current->egid) | 1385 | |
1311 | retval = groups_search(current->group_info, grp); | 1386 | if (grp != cred->egid) |
1387 | retval = groups_search(cred->group_info, grp); | ||
1312 | return retval; | 1388 | return retval; |
1313 | } | 1389 | } |
1314 | 1390 | ||
@@ -1624,50 +1700,56 @@ asmlinkage long sys_umask(int mask) | |||
1624 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | 1700 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, |
1625 | unsigned long arg4, unsigned long arg5) | 1701 | unsigned long arg4, unsigned long arg5) |
1626 | { | 1702 | { |
1627 | long error = 0; | 1703 | struct task_struct *me = current; |
1704 | unsigned char comm[sizeof(me->comm)]; | ||
1705 | long error; | ||
1628 | 1706 | ||
1629 | if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error)) | 1707 | error = security_task_prctl(option, arg2, arg3, arg4, arg5); |
1708 | if (error != -ENOSYS) | ||
1630 | return error; | 1709 | return error; |
1631 | 1710 | ||
1711 | error = 0; | ||
1632 | switch (option) { | 1712 | switch (option) { |
1633 | case PR_SET_PDEATHSIG: | 1713 | case PR_SET_PDEATHSIG: |
1634 | if (!valid_signal(arg2)) { | 1714 | if (!valid_signal(arg2)) { |
1635 | error = -EINVAL; | 1715 | error = -EINVAL; |
1636 | break; | 1716 | break; |
1637 | } | 1717 | } |
1638 | current->pdeath_signal = arg2; | 1718 | me->pdeath_signal = arg2; |
1719 | error = 0; | ||
1639 | break; | 1720 | break; |
1640 | case PR_GET_PDEATHSIG: | 1721 | case PR_GET_PDEATHSIG: |
1641 | error = put_user(current->pdeath_signal, (int __user *)arg2); | 1722 | error = put_user(me->pdeath_signal, (int __user *)arg2); |
1642 | break; | 1723 | break; |
1643 | case PR_GET_DUMPABLE: | 1724 | case PR_GET_DUMPABLE: |
1644 | error = get_dumpable(current->mm); | 1725 | error = get_dumpable(me->mm); |
1645 | break; | 1726 | break; |
1646 | case PR_SET_DUMPABLE: | 1727 | case PR_SET_DUMPABLE: |
1647 | if (arg2 < 0 || arg2 > 1) { | 1728 | if (arg2 < 0 || arg2 > 1) { |
1648 | error = -EINVAL; | 1729 | error = -EINVAL; |
1649 | break; | 1730 | break; |
1650 | } | 1731 | } |
1651 | set_dumpable(current->mm, arg2); | 1732 | set_dumpable(me->mm, arg2); |
1733 | error = 0; | ||
1652 | break; | 1734 | break; |
1653 | 1735 | ||
1654 | case PR_SET_UNALIGN: | 1736 | case PR_SET_UNALIGN: |
1655 | error = SET_UNALIGN_CTL(current, arg2); | 1737 | error = SET_UNALIGN_CTL(me, arg2); |
1656 | break; | 1738 | break; |
1657 | case PR_GET_UNALIGN: | 1739 | case PR_GET_UNALIGN: |
1658 | error = GET_UNALIGN_CTL(current, arg2); | 1740 | error = GET_UNALIGN_CTL(me, arg2); |
1659 | break; | 1741 | break; |
1660 | case PR_SET_FPEMU: | 1742 | case PR_SET_FPEMU: |
1661 | error = SET_FPEMU_CTL(current, arg2); | 1743 | error = SET_FPEMU_CTL(me, arg2); |
1662 | break; | 1744 | break; |
1663 | case PR_GET_FPEMU: | 1745 | case PR_GET_FPEMU: |
1664 | error = GET_FPEMU_CTL(current, arg2); | 1746 | error = GET_FPEMU_CTL(me, arg2); |
1665 | break; | 1747 | break; |
1666 | case PR_SET_FPEXC: | 1748 | case PR_SET_FPEXC: |
1667 | error = SET_FPEXC_CTL(current, arg2); | 1749 | error = SET_FPEXC_CTL(me, arg2); |
1668 | break; | 1750 | break; |
1669 | case PR_GET_FPEXC: | 1751 | case PR_GET_FPEXC: |
1670 | error = GET_FPEXC_CTL(current, arg2); | 1752 | error = GET_FPEXC_CTL(me, arg2); |
1671 | break; | 1753 | break; |
1672 | case PR_GET_TIMING: | 1754 | case PR_GET_TIMING: |
1673 | error = PR_TIMING_STATISTICAL; | 1755 | error = PR_TIMING_STATISTICAL; |
@@ -1675,33 +1757,28 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1675 | case PR_SET_TIMING: | 1757 | case PR_SET_TIMING: |
1676 | if (arg2 != PR_TIMING_STATISTICAL) | 1758 | if (arg2 != PR_TIMING_STATISTICAL) |
1677 | error = -EINVAL; | 1759 | error = -EINVAL; |
1760 | else | ||
1761 | error = 0; | ||
1678 | break; | 1762 | break; |
1679 | 1763 | ||
1680 | case PR_SET_NAME: { | 1764 | case PR_SET_NAME: |
1681 | struct task_struct *me = current; | 1765 | comm[sizeof(me->comm)-1] = 0; |
1682 | unsigned char ncomm[sizeof(me->comm)]; | 1766 | if (strncpy_from_user(comm, (char __user *)arg2, |
1683 | 1767 | sizeof(me->comm) - 1) < 0) | |
1684 | ncomm[sizeof(me->comm)-1] = 0; | ||
1685 | if (strncpy_from_user(ncomm, (char __user *)arg2, | ||
1686 | sizeof(me->comm)-1) < 0) | ||
1687 | return -EFAULT; | 1768 | return -EFAULT; |
1688 | set_task_comm(me, ncomm); | 1769 | set_task_comm(me, comm); |
1689 | return 0; | 1770 | return 0; |
1690 | } | 1771 | case PR_GET_NAME: |
1691 | case PR_GET_NAME: { | 1772 | get_task_comm(comm, me); |
1692 | struct task_struct *me = current; | 1773 | if (copy_to_user((char __user *)arg2, comm, |
1693 | unsigned char tcomm[sizeof(me->comm)]; | 1774 | sizeof(comm))) |
1694 | |||
1695 | get_task_comm(tcomm, me); | ||
1696 | if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) | ||
1697 | return -EFAULT; | 1775 | return -EFAULT; |
1698 | return 0; | 1776 | return 0; |
1699 | } | ||
1700 | case PR_GET_ENDIAN: | 1777 | case PR_GET_ENDIAN: |
1701 | error = GET_ENDIAN(current, arg2); | 1778 | error = GET_ENDIAN(me, arg2); |
1702 | break; | 1779 | break; |
1703 | case PR_SET_ENDIAN: | 1780 | case PR_SET_ENDIAN: |
1704 | error = SET_ENDIAN(current, arg2); | 1781 | error = SET_ENDIAN(me, arg2); |
1705 | break; | 1782 | break; |
1706 | 1783 | ||
1707 | case PR_GET_SECCOMP: | 1784 | case PR_GET_SECCOMP: |
@@ -1725,6 +1802,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, | |||
1725 | current->default_timer_slack_ns; | 1802 | current->default_timer_slack_ns; |
1726 | else | 1803 | else |
1727 | current->timer_slack_ns = arg2; | 1804 | current->timer_slack_ns = arg2; |
1805 | error = 0; | ||
1728 | break; | 1806 | break; |
1729 | default: | 1807 | default: |
1730 | error = -EINVAL; | 1808 | error = -EINVAL; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4e2ac0aec9b0..ff6d45c7626f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -501,6 +501,26 @@ static struct ctl_table kern_table[] = { | |||
501 | .proc_handler = &ftrace_enable_sysctl, | 501 | .proc_handler = &ftrace_enable_sysctl, |
502 | }, | 502 | }, |
503 | #endif | 503 | #endif |
504 | #ifdef CONFIG_STACK_TRACER | ||
505 | { | ||
506 | .ctl_name = CTL_UNNUMBERED, | ||
507 | .procname = "stack_tracer_enabled", | ||
508 | .data = &stack_tracer_enabled, | ||
509 | .maxlen = sizeof(int), | ||
510 | .mode = 0644, | ||
511 | .proc_handler = &stack_trace_sysctl, | ||
512 | }, | ||
513 | #endif | ||
514 | #ifdef CONFIG_TRACING | ||
515 | { | ||
516 | .ctl_name = CTL_UNNUMBERED, | ||
517 | .procname = "ftrace_dump_on_oops", | ||
518 | .data = &ftrace_dump_on_oops, | ||
519 | .maxlen = sizeof(int), | ||
520 | .mode = 0644, | ||
521 | .proc_handler = &proc_dointvec, | ||
522 | }, | ||
523 | #endif | ||
504 | #ifdef CONFIG_MODULES | 524 | #ifdef CONFIG_MODULES |
505 | { | 525 | { |
506 | .ctl_name = KERN_MODPROBE, | 526 | .ctl_name = KERN_MODPROBE, |
@@ -1665,7 +1685,7 @@ out: | |||
1665 | 1685 | ||
1666 | static int test_perm(int mode, int op) | 1686 | static int test_perm(int mode, int op) |
1667 | { | 1687 | { |
1668 | if (!current->euid) | 1688 | if (!current_euid()) |
1669 | mode >>= 6; | 1689 | mode >>= 6; |
1670 | else if (in_egroup_p(0)) | 1690 | else if (in_egroup_p(0)) |
1671 | mode >>= 3; | 1691 | mode >>= 3; |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index e7acfb482a68..fa05e88aa76f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -518,6 +518,28 @@ void update_wall_time(void) | |||
518 | /* correct the clock when NTP error is too big */ | 518 | /* correct the clock when NTP error is too big */ |
519 | clocksource_adjust(offset); | 519 | clocksource_adjust(offset); |
520 | 520 | ||
521 | /* | ||
522 | * Since in the loop above, we accumulate any amount of time | ||
523 | * in xtime_nsec over a second into xtime.tv_sec, its possible for | ||
524 | * xtime_nsec to be fairly small after the loop. Further, if we're | ||
525 | * slightly speeding the clocksource up in clocksource_adjust(), | ||
526 | * its possible the required corrective factor to xtime_nsec could | ||
527 | * cause it to underflow. | ||
528 | * | ||
529 | * Now, we cannot simply roll the accumulated second back, since | ||
530 | * the NTP subsystem has been notified via second_overflow. So | ||
531 | * instead we push xtime_nsec forward by the amount we underflowed, | ||
532 | * and add that amount into the error. | ||
533 | * | ||
534 | * We'll correct this error next time through this function, when | ||
535 | * xtime_nsec is not as small. | ||
536 | */ | ||
537 | if (unlikely((s64)clock->xtime_nsec < 0)) { | ||
538 | s64 neg = -(s64)clock->xtime_nsec; | ||
539 | clock->xtime_nsec = 0; | ||
540 | clock->error += neg << (NTP_SCALE_SHIFT - clock->shift); | ||
541 | } | ||
542 | |||
521 | /* store full nanoseconds into xtime after rounding it up and | 543 | /* store full nanoseconds into xtime after rounding it up and |
522 | * add the remainder to the error difference. | 544 | * add the remainder to the error difference. |
523 | */ | 545 | */ |
diff --git a/kernel/timer.c b/kernel/timer.c index dbd50fabe4c7..566257d1dc10 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1192,25 +1192,25 @@ asmlinkage long sys_getppid(void) | |||
1192 | asmlinkage long sys_getuid(void) | 1192 | asmlinkage long sys_getuid(void) |
1193 | { | 1193 | { |
1194 | /* Only we change this so SMP safe */ | 1194 | /* Only we change this so SMP safe */ |
1195 | return current->uid; | 1195 | return current_uid(); |
1196 | } | 1196 | } |
1197 | 1197 | ||
1198 | asmlinkage long sys_geteuid(void) | 1198 | asmlinkage long sys_geteuid(void) |
1199 | { | 1199 | { |
1200 | /* Only we change this so SMP safe */ | 1200 | /* Only we change this so SMP safe */ |
1201 | return current->euid; | 1201 | return current_euid(); |
1202 | } | 1202 | } |
1203 | 1203 | ||
1204 | asmlinkage long sys_getgid(void) | 1204 | asmlinkage long sys_getgid(void) |
1205 | { | 1205 | { |
1206 | /* Only we change this so SMP safe */ | 1206 | /* Only we change this so SMP safe */ |
1207 | return current->gid; | 1207 | return current_gid(); |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | asmlinkage long sys_getegid(void) | 1210 | asmlinkage long sys_getegid(void) |
1211 | { | 1211 | { |
1212 | /* Only we change this so SMP safe */ | 1212 | /* Only we change this so SMP safe */ |
1213 | return current->egid; | 1213 | return current_egid(); |
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | #endif | 1216 | #endif |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 33dbefd471e8..e2a4ff6fc3a6 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -3,18 +3,34 @@ | |||
3 | # select HAVE_FUNCTION_TRACER: | 3 | # select HAVE_FUNCTION_TRACER: |
4 | # | 4 | # |
5 | 5 | ||
6 | config USER_STACKTRACE_SUPPORT | ||
7 | bool | ||
8 | |||
6 | config NOP_TRACER | 9 | config NOP_TRACER |
7 | bool | 10 | bool |
8 | 11 | ||
9 | config HAVE_FUNCTION_TRACER | 12 | config HAVE_FUNCTION_TRACER |
10 | bool | 13 | bool |
11 | 14 | ||
15 | config HAVE_FUNCTION_GRAPH_TRACER | ||
16 | bool | ||
17 | |||
18 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
19 | bool | ||
20 | help | ||
21 | This gets selected when the arch tests the function_trace_stop | ||
22 | variable at the mcount call site. Otherwise, this variable | ||
23 | is tested by the called function. | ||
24 | |||
12 | config HAVE_DYNAMIC_FTRACE | 25 | config HAVE_DYNAMIC_FTRACE |
13 | bool | 26 | bool |
14 | 27 | ||
15 | config HAVE_FTRACE_MCOUNT_RECORD | 28 | config HAVE_FTRACE_MCOUNT_RECORD |
16 | bool | 29 | bool |
17 | 30 | ||
31 | config HAVE_HW_BRANCH_TRACER | ||
32 | bool | ||
33 | |||
18 | config TRACER_MAX_TRACE | 34 | config TRACER_MAX_TRACE |
19 | bool | 35 | bool |
20 | 36 | ||
@@ -47,6 +63,20 @@ config FUNCTION_TRACER | |||
47 | (the bootup default), then the overhead of the instructions is very | 63 | (the bootup default), then the overhead of the instructions is very |
48 | small and not measurable even in micro-benchmarks. | 64 | small and not measurable even in micro-benchmarks. |
49 | 65 | ||
66 | config FUNCTION_GRAPH_TRACER | ||
67 | bool "Kernel Function Graph Tracer" | ||
68 | depends on HAVE_FUNCTION_GRAPH_TRACER | ||
69 | depends on FUNCTION_TRACER | ||
70 | default y | ||
71 | help | ||
72 | Enable the kernel to trace a function at both its return | ||
73 | and its entry. | ||
74 | It's first purpose is to trace the duration of functions and | ||
75 | draw a call graph for each thread with some informations like | ||
76 | the return value. | ||
77 | This is done by setting the current return address on the current | ||
78 | task structure into a stack of calls. | ||
79 | |||
50 | config IRQSOFF_TRACER | 80 | config IRQSOFF_TRACER |
51 | bool "Interrupts-off Latency Tracer" | 81 | bool "Interrupts-off Latency Tracer" |
52 | default n | 82 | default n |
@@ -138,6 +168,70 @@ config BOOT_TRACER | |||
138 | selected, because the self-tests are an initcall as well and that | 168 | selected, because the self-tests are an initcall as well and that |
139 | would invalidate the boot trace. ) | 169 | would invalidate the boot trace. ) |
140 | 170 | ||
171 | config TRACE_BRANCH_PROFILING | ||
172 | bool "Trace likely/unlikely profiler" | ||
173 | depends on DEBUG_KERNEL | ||
174 | select TRACING | ||
175 | help | ||
176 | This tracer profiles all the the likely and unlikely macros | ||
177 | in the kernel. It will display the results in: | ||
178 | |||
179 | /debugfs/tracing/profile_annotated_branch | ||
180 | |||
181 | Note: this will add a significant overhead, only turn this | ||
182 | on if you need to profile the system's use of these macros. | ||
183 | |||
184 | Say N if unsure. | ||
185 | |||
186 | config PROFILE_ALL_BRANCHES | ||
187 | bool "Profile all if conditionals" | ||
188 | depends on TRACE_BRANCH_PROFILING | ||
189 | help | ||
190 | This tracer profiles all branch conditions. Every if () | ||
191 | taken in the kernel is recorded whether it hit or miss. | ||
192 | The results will be displayed in: | ||
193 | |||
194 | /debugfs/tracing/profile_branch | ||
195 | |||
196 | This configuration, when enabled, will impose a great overhead | ||
197 | on the system. This should only be enabled when the system | ||
198 | is to be analyzed | ||
199 | |||
200 | Say N if unsure. | ||
201 | |||
202 | config TRACING_BRANCHES | ||
203 | bool | ||
204 | help | ||
205 | Selected by tracers that will trace the likely and unlikely | ||
206 | conditions. This prevents the tracers themselves from being | ||
207 | profiled. Profiling the tracing infrastructure can only happen | ||
208 | when the likelys and unlikelys are not being traced. | ||
209 | |||
210 | config BRANCH_TRACER | ||
211 | bool "Trace likely/unlikely instances" | ||
212 | depends on TRACE_BRANCH_PROFILING | ||
213 | select TRACING_BRANCHES | ||
214 | help | ||
215 | This traces the events of likely and unlikely condition | ||
216 | calls in the kernel. The difference between this and the | ||
217 | "Trace likely/unlikely profiler" is that this is not a | ||
218 | histogram of the callers, but actually places the calling | ||
219 | events into a running trace buffer to see when and where the | ||
220 | events happened, as well as their results. | ||
221 | |||
222 | Say N if unsure. | ||
223 | |||
224 | config POWER_TRACER | ||
225 | bool "Trace power consumption behavior" | ||
226 | depends on DEBUG_KERNEL | ||
227 | depends on X86 | ||
228 | select TRACING | ||
229 | help | ||
230 | This tracer helps developers to analyze and optimize the kernels | ||
231 | power management decisions, specifically the C-state and P-state | ||
232 | behavior. | ||
233 | |||
234 | |||
141 | config STACK_TRACER | 235 | config STACK_TRACER |
142 | bool "Trace max stack" | 236 | bool "Trace max stack" |
143 | depends on HAVE_FUNCTION_TRACER | 237 | depends on HAVE_FUNCTION_TRACER |
@@ -150,13 +244,26 @@ config STACK_TRACER | |||
150 | 244 | ||
151 | This tracer works by hooking into every function call that the | 245 | This tracer works by hooking into every function call that the |
152 | kernel executes, and keeping a maximum stack depth value and | 246 | kernel executes, and keeping a maximum stack depth value and |
153 | stack-trace saved. Because this logic has to execute in every | 247 | stack-trace saved. If this is configured with DYNAMIC_FTRACE |
154 | kernel function, all the time, this option can slow down the | 248 | then it will not have any overhead while the stack tracer |
155 | kernel measurably and is generally intended for kernel | 249 | is disabled. |
156 | developers only. | 250 | |
251 | To enable the stack tracer on bootup, pass in 'stacktrace' | ||
252 | on the kernel command line. | ||
253 | |||
254 | The stack tracer can also be enabled or disabled via the | ||
255 | sysctl kernel.stack_tracer_enabled | ||
157 | 256 | ||
158 | Say N if unsure. | 257 | Say N if unsure. |
159 | 258 | ||
259 | config HW_BRANCH_TRACER | ||
260 | depends on HAVE_HW_BRANCH_TRACER | ||
261 | bool "Trace hw branches" | ||
262 | select TRACING | ||
263 | help | ||
264 | This tracer records all branches on the system in a circular | ||
265 | buffer giving access to the last N branches for each cpu. | ||
266 | |||
160 | config DYNAMIC_FTRACE | 267 | config DYNAMIC_FTRACE |
161 | bool "enable/disable ftrace tracepoints dynamically" | 268 | bool "enable/disable ftrace tracepoints dynamically" |
162 | depends on FUNCTION_TRACER | 269 | depends on FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index c8228b1a49e9..349d5a93653f 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -10,6 +10,11 @@ CFLAGS_trace_selftest_dynamic.o = -pg | |||
10 | obj-y += trace_selftest_dynamic.o | 10 | obj-y += trace_selftest_dynamic.o |
11 | endif | 11 | endif |
12 | 12 | ||
13 | # If unlikely tracing is enabled, do not trace these files | ||
14 | ifdef CONFIG_TRACING_BRANCHES | ||
15 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | ||
16 | endif | ||
17 | |||
13 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o | 18 | obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o |
14 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 19 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
15 | 20 | ||
@@ -24,5 +29,9 @@ obj-$(CONFIG_NOP_TRACER) += trace_nop.o | |||
24 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 29 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
25 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 30 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
26 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | 31 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o |
32 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | ||
33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | ||
34 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | ||
35 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | ||
27 | 36 | ||
28 | libftrace-y := ftrace.o | 37 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 78db083390f0..2f32969c09df 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -47,6 +47,13 @@ | |||
47 | int ftrace_enabled __read_mostly; | 47 | int ftrace_enabled __read_mostly; |
48 | static int last_ftrace_enabled; | 48 | static int last_ftrace_enabled; |
49 | 49 | ||
50 | /* set when tracing only a pid */ | ||
51 | struct pid *ftrace_pid_trace; | ||
52 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
53 | |||
54 | /* Quick disabling of function tracer. */ | ||
55 | int function_trace_stop; | ||
56 | |||
50 | /* | 57 | /* |
51 | * ftrace_disabled is set when an anomaly is discovered. | 58 | * ftrace_disabled is set when an anomaly is discovered. |
52 | * ftrace_disabled is much stronger than ftrace_enabled. | 59 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -55,6 +62,7 @@ static int ftrace_disabled __read_mostly; | |||
55 | 62 | ||
56 | static DEFINE_SPINLOCK(ftrace_lock); | 63 | static DEFINE_SPINLOCK(ftrace_lock); |
57 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 64 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
65 | static DEFINE_MUTEX(ftrace_start_lock); | ||
58 | 66 | ||
59 | static struct ftrace_ops ftrace_list_end __read_mostly = | 67 | static struct ftrace_ops ftrace_list_end __read_mostly = |
60 | { | 68 | { |
@@ -63,6 +71,8 @@ static struct ftrace_ops ftrace_list_end __read_mostly = | |||
63 | 71 | ||
64 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; | 72 | static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end; |
65 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 73 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
74 | ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; | ||
75 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
66 | 76 | ||
67 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | 77 | static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) |
68 | { | 78 | { |
@@ -79,6 +89,21 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
79 | }; | 89 | }; |
80 | } | 90 | } |
81 | 91 | ||
92 | static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) | ||
93 | { | ||
94 | if (!test_tsk_trace_trace(current)) | ||
95 | return; | ||
96 | |||
97 | ftrace_pid_function(ip, parent_ip); | ||
98 | } | ||
99 | |||
100 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
101 | { | ||
102 | /* do not set ftrace_pid_function to itself! */ | ||
103 | if (func != ftrace_pid_func) | ||
104 | ftrace_pid_function = func; | ||
105 | } | ||
106 | |||
82 | /** | 107 | /** |
83 | * clear_ftrace_function - reset the ftrace function | 108 | * clear_ftrace_function - reset the ftrace function |
84 | * | 109 | * |
@@ -88,7 +113,23 @@ static void ftrace_list_func(unsigned long ip, unsigned long parent_ip) | |||
88 | void clear_ftrace_function(void) | 113 | void clear_ftrace_function(void) |
89 | { | 114 | { |
90 | ftrace_trace_function = ftrace_stub; | 115 | ftrace_trace_function = ftrace_stub; |
116 | __ftrace_trace_function = ftrace_stub; | ||
117 | ftrace_pid_function = ftrace_stub; | ||
118 | } | ||
119 | |||
120 | #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
121 | /* | ||
122 | * For those archs that do not test ftrace_trace_stop in their | ||
123 | * mcount call site, we need to do it from C. | ||
124 | */ | ||
125 | static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | ||
126 | { | ||
127 | if (function_trace_stop) | ||
128 | return; | ||
129 | |||
130 | __ftrace_trace_function(ip, parent_ip); | ||
91 | } | 131 | } |
132 | #endif | ||
92 | 133 | ||
93 | static int __register_ftrace_function(struct ftrace_ops *ops) | 134 | static int __register_ftrace_function(struct ftrace_ops *ops) |
94 | { | 135 | { |
@@ -106,14 +147,28 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
106 | ftrace_list = ops; | 147 | ftrace_list = ops; |
107 | 148 | ||
108 | if (ftrace_enabled) { | 149 | if (ftrace_enabled) { |
150 | ftrace_func_t func; | ||
151 | |||
152 | if (ops->next == &ftrace_list_end) | ||
153 | func = ops->func; | ||
154 | else | ||
155 | func = ftrace_list_func; | ||
156 | |||
157 | if (ftrace_pid_trace) { | ||
158 | set_ftrace_pid_function(func); | ||
159 | func = ftrace_pid_func; | ||
160 | } | ||
161 | |||
109 | /* | 162 | /* |
110 | * For one func, simply call it directly. | 163 | * For one func, simply call it directly. |
111 | * For more than one func, call the chain. | 164 | * For more than one func, call the chain. |
112 | */ | 165 | */ |
113 | if (ops->next == &ftrace_list_end) | 166 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST |
114 | ftrace_trace_function = ops->func; | 167 | ftrace_trace_function = func; |
115 | else | 168 | #else |
116 | ftrace_trace_function = ftrace_list_func; | 169 | __ftrace_trace_function = func; |
170 | ftrace_trace_function = ftrace_test_stop_func; | ||
171 | #endif | ||
117 | } | 172 | } |
118 | 173 | ||
119 | spin_unlock(&ftrace_lock); | 174 | spin_unlock(&ftrace_lock); |
@@ -152,9 +207,19 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
152 | 207 | ||
153 | if (ftrace_enabled) { | 208 | if (ftrace_enabled) { |
154 | /* If we only have one func left, then call that directly */ | 209 | /* If we only have one func left, then call that directly */ |
155 | if (ftrace_list == &ftrace_list_end || | 210 | if (ftrace_list->next == &ftrace_list_end) { |
156 | ftrace_list->next == &ftrace_list_end) | 211 | ftrace_func_t func = ftrace_list->func; |
157 | ftrace_trace_function = ftrace_list->func; | 212 | |
213 | if (ftrace_pid_trace) { | ||
214 | set_ftrace_pid_function(func); | ||
215 | func = ftrace_pid_func; | ||
216 | } | ||
217 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
218 | ftrace_trace_function = func; | ||
219 | #else | ||
220 | __ftrace_trace_function = func; | ||
221 | #endif | ||
222 | } | ||
158 | } | 223 | } |
159 | 224 | ||
160 | out: | 225 | out: |
@@ -163,6 +228,36 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
163 | return ret; | 228 | return ret; |
164 | } | 229 | } |
165 | 230 | ||
231 | static void ftrace_update_pid_func(void) | ||
232 | { | ||
233 | ftrace_func_t func; | ||
234 | |||
235 | /* should not be called from interrupt context */ | ||
236 | spin_lock(&ftrace_lock); | ||
237 | |||
238 | if (ftrace_trace_function == ftrace_stub) | ||
239 | goto out; | ||
240 | |||
241 | func = ftrace_trace_function; | ||
242 | |||
243 | if (ftrace_pid_trace) { | ||
244 | set_ftrace_pid_function(func); | ||
245 | func = ftrace_pid_func; | ||
246 | } else { | ||
247 | if (func == ftrace_pid_func) | ||
248 | func = ftrace_pid_function; | ||
249 | } | ||
250 | |||
251 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
252 | ftrace_trace_function = func; | ||
253 | #else | ||
254 | __ftrace_trace_function = func; | ||
255 | #endif | ||
256 | |||
257 | out: | ||
258 | spin_unlock(&ftrace_lock); | ||
259 | } | ||
260 | |||
166 | #ifdef CONFIG_DYNAMIC_FTRACE | 261 | #ifdef CONFIG_DYNAMIC_FTRACE |
167 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 262 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
168 | # error Dynamic ftrace depends on MCOUNT_RECORD | 263 | # error Dynamic ftrace depends on MCOUNT_RECORD |
@@ -182,6 +277,8 @@ enum { | |||
182 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), | 277 | FTRACE_UPDATE_TRACE_FUNC = (1 << 2), |
183 | FTRACE_ENABLE_MCOUNT = (1 << 3), | 278 | FTRACE_ENABLE_MCOUNT = (1 << 3), |
184 | FTRACE_DISABLE_MCOUNT = (1 << 4), | 279 | FTRACE_DISABLE_MCOUNT = (1 << 4), |
280 | FTRACE_START_FUNC_RET = (1 << 5), | ||
281 | FTRACE_STOP_FUNC_RET = (1 << 6), | ||
185 | }; | 282 | }; |
186 | 283 | ||
187 | static int ftrace_filtered; | 284 | static int ftrace_filtered; |
@@ -308,7 +405,7 @@ ftrace_record_ip(unsigned long ip) | |||
308 | { | 405 | { |
309 | struct dyn_ftrace *rec; | 406 | struct dyn_ftrace *rec; |
310 | 407 | ||
311 | if (!ftrace_enabled || ftrace_disabled) | 408 | if (ftrace_disabled) |
312 | return NULL; | 409 | return NULL; |
313 | 410 | ||
314 | rec = ftrace_alloc_dyn_node(ip); | 411 | rec = ftrace_alloc_dyn_node(ip); |
@@ -322,14 +419,51 @@ ftrace_record_ip(unsigned long ip) | |||
322 | return rec; | 419 | return rec; |
323 | } | 420 | } |
324 | 421 | ||
325 | #define FTRACE_ADDR ((long)(ftrace_caller)) | 422 | static void print_ip_ins(const char *fmt, unsigned char *p) |
423 | { | ||
424 | int i; | ||
425 | |||
426 | printk(KERN_CONT "%s", fmt); | ||
427 | |||
428 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
429 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
430 | } | ||
431 | |||
432 | static void ftrace_bug(int failed, unsigned long ip) | ||
433 | { | ||
434 | switch (failed) { | ||
435 | case -EFAULT: | ||
436 | FTRACE_WARN_ON_ONCE(1); | ||
437 | pr_info("ftrace faulted on modifying "); | ||
438 | print_ip_sym(ip); | ||
439 | break; | ||
440 | case -EINVAL: | ||
441 | FTRACE_WARN_ON_ONCE(1); | ||
442 | pr_info("ftrace failed to modify "); | ||
443 | print_ip_sym(ip); | ||
444 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
445 | printk(KERN_CONT "\n"); | ||
446 | break; | ||
447 | case -EPERM: | ||
448 | FTRACE_WARN_ON_ONCE(1); | ||
449 | pr_info("ftrace faulted on writing "); | ||
450 | print_ip_sym(ip); | ||
451 | break; | ||
452 | default: | ||
453 | FTRACE_WARN_ON_ONCE(1); | ||
454 | pr_info("ftrace faulted on unknown error "); | ||
455 | print_ip_sym(ip); | ||
456 | } | ||
457 | } | ||
458 | |||
326 | 459 | ||
327 | static int | 460 | static int |
328 | __ftrace_replace_code(struct dyn_ftrace *rec, | 461 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
329 | unsigned char *nop, int enable) | ||
330 | { | 462 | { |
331 | unsigned long ip, fl; | 463 | unsigned long ip, fl; |
332 | unsigned char *call, *old, *new; | 464 | unsigned long ftrace_addr; |
465 | |||
466 | ftrace_addr = (unsigned long)ftrace_caller; | ||
333 | 467 | ||
334 | ip = rec->ip; | 468 | ip = rec->ip; |
335 | 469 | ||
@@ -388,34 +522,28 @@ __ftrace_replace_code(struct dyn_ftrace *rec, | |||
388 | } | 522 | } |
389 | } | 523 | } |
390 | 524 | ||
391 | call = ftrace_call_replace(ip, FTRACE_ADDR); | 525 | if (rec->flags & FTRACE_FL_ENABLED) |
392 | 526 | return ftrace_make_call(rec, ftrace_addr); | |
393 | if (rec->flags & FTRACE_FL_ENABLED) { | 527 | else |
394 | old = nop; | 528 | return ftrace_make_nop(NULL, rec, ftrace_addr); |
395 | new = call; | ||
396 | } else { | ||
397 | old = call; | ||
398 | new = nop; | ||
399 | } | ||
400 | |||
401 | return ftrace_modify_code(ip, old, new); | ||
402 | } | 529 | } |
403 | 530 | ||
404 | static void ftrace_replace_code(int enable) | 531 | static void ftrace_replace_code(int enable) |
405 | { | 532 | { |
406 | int i, failed; | 533 | int i, failed; |
407 | unsigned char *nop = NULL; | ||
408 | struct dyn_ftrace *rec; | 534 | struct dyn_ftrace *rec; |
409 | struct ftrace_page *pg; | 535 | struct ftrace_page *pg; |
410 | 536 | ||
411 | nop = ftrace_nop_replace(); | ||
412 | |||
413 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 537 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
414 | for (i = 0; i < pg->index; i++) { | 538 | for (i = 0; i < pg->index; i++) { |
415 | rec = &pg->records[i]; | 539 | rec = &pg->records[i]; |
416 | 540 | ||
417 | /* don't modify code that has already faulted */ | 541 | /* |
418 | if (rec->flags & FTRACE_FL_FAILED) | 542 | * Skip over free records and records that have |
543 | * failed. | ||
544 | */ | ||
545 | if (rec->flags & FTRACE_FL_FREE || | ||
546 | rec->flags & FTRACE_FL_FAILED) | ||
419 | continue; | 547 | continue; |
420 | 548 | ||
421 | /* ignore updates to this record's mcount site */ | 549 | /* ignore updates to this record's mcount site */ |
@@ -426,68 +554,30 @@ static void ftrace_replace_code(int enable) | |||
426 | unfreeze_record(rec); | 554 | unfreeze_record(rec); |
427 | } | 555 | } |
428 | 556 | ||
429 | failed = __ftrace_replace_code(rec, nop, enable); | 557 | failed = __ftrace_replace_code(rec, enable); |
430 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 558 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
431 | rec->flags |= FTRACE_FL_FAILED; | 559 | rec->flags |= FTRACE_FL_FAILED; |
432 | if ((system_state == SYSTEM_BOOTING) || | 560 | if ((system_state == SYSTEM_BOOTING) || |
433 | !core_kernel_text(rec->ip)) { | 561 | !core_kernel_text(rec->ip)) { |
434 | ftrace_free_rec(rec); | 562 | ftrace_free_rec(rec); |
435 | } | 563 | } else |
564 | ftrace_bug(failed, rec->ip); | ||
436 | } | 565 | } |
437 | } | 566 | } |
438 | } | 567 | } |
439 | } | 568 | } |
440 | 569 | ||
441 | static void print_ip_ins(const char *fmt, unsigned char *p) | ||
442 | { | ||
443 | int i; | ||
444 | |||
445 | printk(KERN_CONT "%s", fmt); | ||
446 | |||
447 | for (i = 0; i < MCOUNT_INSN_SIZE; i++) | ||
448 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | ||
449 | } | ||
450 | |||
451 | static int | 570 | static int |
452 | ftrace_code_disable(struct dyn_ftrace *rec) | 571 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
453 | { | 572 | { |
454 | unsigned long ip; | 573 | unsigned long ip; |
455 | unsigned char *nop, *call; | ||
456 | int ret; | 574 | int ret; |
457 | 575 | ||
458 | ip = rec->ip; | 576 | ip = rec->ip; |
459 | 577 | ||
460 | nop = ftrace_nop_replace(); | 578 | ret = ftrace_make_nop(mod, rec, mcount_addr); |
461 | call = ftrace_call_replace(ip, mcount_addr); | ||
462 | |||
463 | ret = ftrace_modify_code(ip, call, nop); | ||
464 | if (ret) { | 579 | if (ret) { |
465 | switch (ret) { | 580 | ftrace_bug(ret, ip); |
466 | case -EFAULT: | ||
467 | FTRACE_WARN_ON_ONCE(1); | ||
468 | pr_info("ftrace faulted on modifying "); | ||
469 | print_ip_sym(ip); | ||
470 | break; | ||
471 | case -EINVAL: | ||
472 | FTRACE_WARN_ON_ONCE(1); | ||
473 | pr_info("ftrace failed to modify "); | ||
474 | print_ip_sym(ip); | ||
475 | print_ip_ins(" expected: ", call); | ||
476 | print_ip_ins(" actual: ", (unsigned char *)ip); | ||
477 | print_ip_ins(" replace: ", nop); | ||
478 | printk(KERN_CONT "\n"); | ||
479 | break; | ||
480 | case -EPERM: | ||
481 | FTRACE_WARN_ON_ONCE(1); | ||
482 | pr_info("ftrace faulted on writing "); | ||
483 | print_ip_sym(ip); | ||
484 | break; | ||
485 | default: | ||
486 | FTRACE_WARN_ON_ONCE(1); | ||
487 | pr_info("ftrace faulted on unknown error "); | ||
488 | print_ip_sym(ip); | ||
489 | } | ||
490 | |||
491 | rec->flags |= FTRACE_FL_FAILED; | 581 | rec->flags |= FTRACE_FL_FAILED; |
492 | return 0; | 582 | return 0; |
493 | } | 583 | } |
@@ -506,6 +596,11 @@ static int __ftrace_modify_code(void *data) | |||
506 | if (*command & FTRACE_UPDATE_TRACE_FUNC) | 596 | if (*command & FTRACE_UPDATE_TRACE_FUNC) |
507 | ftrace_update_ftrace_func(ftrace_trace_function); | 597 | ftrace_update_ftrace_func(ftrace_trace_function); |
508 | 598 | ||
599 | if (*command & FTRACE_START_FUNC_RET) | ||
600 | ftrace_enable_ftrace_graph_caller(); | ||
601 | else if (*command & FTRACE_STOP_FUNC_RET) | ||
602 | ftrace_disable_ftrace_graph_caller(); | ||
603 | |||
509 | return 0; | 604 | return 0; |
510 | } | 605 | } |
511 | 606 | ||
@@ -515,43 +610,43 @@ static void ftrace_run_update_code(int command) | |||
515 | } | 610 | } |
516 | 611 | ||
517 | static ftrace_func_t saved_ftrace_func; | 612 | static ftrace_func_t saved_ftrace_func; |
518 | static int ftrace_start; | 613 | static int ftrace_start_up; |
519 | static DEFINE_MUTEX(ftrace_start_lock); | ||
520 | 614 | ||
521 | static void ftrace_startup(void) | 615 | static void ftrace_startup_enable(int command) |
522 | { | 616 | { |
523 | int command = 0; | ||
524 | |||
525 | if (unlikely(ftrace_disabled)) | ||
526 | return; | ||
527 | |||
528 | mutex_lock(&ftrace_start_lock); | ||
529 | ftrace_start++; | ||
530 | command |= FTRACE_ENABLE_CALLS; | ||
531 | |||
532 | if (saved_ftrace_func != ftrace_trace_function) { | 617 | if (saved_ftrace_func != ftrace_trace_function) { |
533 | saved_ftrace_func = ftrace_trace_function; | 618 | saved_ftrace_func = ftrace_trace_function; |
534 | command |= FTRACE_UPDATE_TRACE_FUNC; | 619 | command |= FTRACE_UPDATE_TRACE_FUNC; |
535 | } | 620 | } |
536 | 621 | ||
537 | if (!command || !ftrace_enabled) | 622 | if (!command || !ftrace_enabled) |
538 | goto out; | 623 | return; |
539 | 624 | ||
540 | ftrace_run_update_code(command); | 625 | ftrace_run_update_code(command); |
541 | out: | ||
542 | mutex_unlock(&ftrace_start_lock); | ||
543 | } | 626 | } |
544 | 627 | ||
545 | static void ftrace_shutdown(void) | 628 | static void ftrace_startup(int command) |
546 | { | 629 | { |
547 | int command = 0; | 630 | if (unlikely(ftrace_disabled)) |
631 | return; | ||
632 | |||
633 | mutex_lock(&ftrace_start_lock); | ||
634 | ftrace_start_up++; | ||
635 | command |= FTRACE_ENABLE_CALLS; | ||
548 | 636 | ||
637 | ftrace_startup_enable(command); | ||
638 | |||
639 | mutex_unlock(&ftrace_start_lock); | ||
640 | } | ||
641 | |||
642 | static void ftrace_shutdown(int command) | ||
643 | { | ||
549 | if (unlikely(ftrace_disabled)) | 644 | if (unlikely(ftrace_disabled)) |
550 | return; | 645 | return; |
551 | 646 | ||
552 | mutex_lock(&ftrace_start_lock); | 647 | mutex_lock(&ftrace_start_lock); |
553 | ftrace_start--; | 648 | ftrace_start_up--; |
554 | if (!ftrace_start) | 649 | if (!ftrace_start_up) |
555 | command |= FTRACE_DISABLE_CALLS; | 650 | command |= FTRACE_DISABLE_CALLS; |
556 | 651 | ||
557 | if (saved_ftrace_func != ftrace_trace_function) { | 652 | if (saved_ftrace_func != ftrace_trace_function) { |
@@ -577,8 +672,8 @@ static void ftrace_startup_sysctl(void) | |||
577 | mutex_lock(&ftrace_start_lock); | 672 | mutex_lock(&ftrace_start_lock); |
578 | /* Force update next time */ | 673 | /* Force update next time */ |
579 | saved_ftrace_func = NULL; | 674 | saved_ftrace_func = NULL; |
580 | /* ftrace_start is true if we want ftrace running */ | 675 | /* ftrace_start_up is true if we want ftrace running */ |
581 | if (ftrace_start) | 676 | if (ftrace_start_up) |
582 | command |= FTRACE_ENABLE_CALLS; | 677 | command |= FTRACE_ENABLE_CALLS; |
583 | 678 | ||
584 | ftrace_run_update_code(command); | 679 | ftrace_run_update_code(command); |
@@ -593,8 +688,8 @@ static void ftrace_shutdown_sysctl(void) | |||
593 | return; | 688 | return; |
594 | 689 | ||
595 | mutex_lock(&ftrace_start_lock); | 690 | mutex_lock(&ftrace_start_lock); |
596 | /* ftrace_start is true if ftrace is running */ | 691 | /* ftrace_start_up is true if ftrace is running */ |
597 | if (ftrace_start) | 692 | if (ftrace_start_up) |
598 | command |= FTRACE_DISABLE_CALLS; | 693 | command |= FTRACE_DISABLE_CALLS; |
599 | 694 | ||
600 | ftrace_run_update_code(command); | 695 | ftrace_run_update_code(command); |
@@ -605,7 +700,7 @@ static cycle_t ftrace_update_time; | |||
605 | static unsigned long ftrace_update_cnt; | 700 | static unsigned long ftrace_update_cnt; |
606 | unsigned long ftrace_update_tot_cnt; | 701 | unsigned long ftrace_update_tot_cnt; |
607 | 702 | ||
608 | static int ftrace_update_code(void) | 703 | static int ftrace_update_code(struct module *mod) |
609 | { | 704 | { |
610 | struct dyn_ftrace *p, *t; | 705 | struct dyn_ftrace *p, *t; |
611 | cycle_t start, stop; | 706 | cycle_t start, stop; |
@@ -622,7 +717,7 @@ static int ftrace_update_code(void) | |||
622 | list_del_init(&p->list); | 717 | list_del_init(&p->list); |
623 | 718 | ||
624 | /* convert record (i.e, patch mcount-call with NOP) */ | 719 | /* convert record (i.e, patch mcount-call with NOP) */ |
625 | if (ftrace_code_disable(p)) { | 720 | if (ftrace_code_disable(mod, p)) { |
626 | p->flags |= FTRACE_FL_CONVERTED; | 721 | p->flags |= FTRACE_FL_CONVERTED; |
627 | ftrace_update_cnt++; | 722 | ftrace_update_cnt++; |
628 | } else | 723 | } else |
@@ -690,7 +785,6 @@ enum { | |||
690 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 785 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
691 | 786 | ||
692 | struct ftrace_iterator { | 787 | struct ftrace_iterator { |
693 | loff_t pos; | ||
694 | struct ftrace_page *pg; | 788 | struct ftrace_page *pg; |
695 | unsigned idx; | 789 | unsigned idx; |
696 | unsigned flags; | 790 | unsigned flags; |
@@ -715,6 +809,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
715 | iter->pg = iter->pg->next; | 809 | iter->pg = iter->pg->next; |
716 | iter->idx = 0; | 810 | iter->idx = 0; |
717 | goto retry; | 811 | goto retry; |
812 | } else { | ||
813 | iter->idx = -1; | ||
718 | } | 814 | } |
719 | } else { | 815 | } else { |
720 | rec = &iter->pg->records[iter->idx++]; | 816 | rec = &iter->pg->records[iter->idx++]; |
@@ -737,8 +833,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
737 | } | 833 | } |
738 | spin_unlock(&ftrace_lock); | 834 | spin_unlock(&ftrace_lock); |
739 | 835 | ||
740 | iter->pos = *pos; | ||
741 | |||
742 | return rec; | 836 | return rec; |
743 | } | 837 | } |
744 | 838 | ||
@@ -746,13 +840,15 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
746 | { | 840 | { |
747 | struct ftrace_iterator *iter = m->private; | 841 | struct ftrace_iterator *iter = m->private; |
748 | void *p = NULL; | 842 | void *p = NULL; |
749 | loff_t l = -1; | ||
750 | 843 | ||
751 | if (*pos > iter->pos) | 844 | if (*pos > 0) { |
752 | *pos = iter->pos; | 845 | if (iter->idx < 0) |
846 | return p; | ||
847 | (*pos)--; | ||
848 | iter->idx--; | ||
849 | } | ||
753 | 850 | ||
754 | l = *pos; | 851 | p = t_next(m, p, pos); |
755 | p = t_next(m, p, &l); | ||
756 | 852 | ||
757 | return p; | 853 | return p; |
758 | } | 854 | } |
@@ -763,21 +859,15 @@ static void t_stop(struct seq_file *m, void *p) | |||
763 | 859 | ||
764 | static int t_show(struct seq_file *m, void *v) | 860 | static int t_show(struct seq_file *m, void *v) |
765 | { | 861 | { |
766 | struct ftrace_iterator *iter = m->private; | ||
767 | struct dyn_ftrace *rec = v; | 862 | struct dyn_ftrace *rec = v; |
768 | char str[KSYM_SYMBOL_LEN]; | 863 | char str[KSYM_SYMBOL_LEN]; |
769 | int ret = 0; | ||
770 | 864 | ||
771 | if (!rec) | 865 | if (!rec) |
772 | return 0; | 866 | return 0; |
773 | 867 | ||
774 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 868 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); |
775 | 869 | ||
776 | ret = seq_printf(m, "%s\n", str); | 870 | seq_printf(m, "%s\n", str); |
777 | if (ret < 0) { | ||
778 | iter->pos--; | ||
779 | iter->idx--; | ||
780 | } | ||
781 | 871 | ||
782 | return 0; | 872 | return 0; |
783 | } | 873 | } |
@@ -803,7 +893,6 @@ ftrace_avail_open(struct inode *inode, struct file *file) | |||
803 | return -ENOMEM; | 893 | return -ENOMEM; |
804 | 894 | ||
805 | iter->pg = ftrace_pages_start; | 895 | iter->pg = ftrace_pages_start; |
806 | iter->pos = 0; | ||
807 | 896 | ||
808 | ret = seq_open(file, &show_ftrace_seq_ops); | 897 | ret = seq_open(file, &show_ftrace_seq_ops); |
809 | if (!ret) { | 898 | if (!ret) { |
@@ -890,7 +979,6 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable) | |||
890 | 979 | ||
891 | if (file->f_mode & FMODE_READ) { | 980 | if (file->f_mode & FMODE_READ) { |
892 | iter->pg = ftrace_pages_start; | 981 | iter->pg = ftrace_pages_start; |
893 | iter->pos = 0; | ||
894 | iter->flags = enable ? FTRACE_ITER_FILTER : | 982 | iter->flags = enable ? FTRACE_ITER_FILTER : |
895 | FTRACE_ITER_NOTRACE; | 983 | FTRACE_ITER_NOTRACE; |
896 | 984 | ||
@@ -959,6 +1047,13 @@ ftrace_match(unsigned char *buff, int len, int enable) | |||
959 | int type = MATCH_FULL; | 1047 | int type = MATCH_FULL; |
960 | unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1048 | unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
961 | unsigned i, match = 0, search_len = 0; | 1049 | unsigned i, match = 0, search_len = 0; |
1050 | int not = 0; | ||
1051 | |||
1052 | if (buff[0] == '!') { | ||
1053 | not = 1; | ||
1054 | buff++; | ||
1055 | len--; | ||
1056 | } | ||
962 | 1057 | ||
963 | for (i = 0; i < len; i++) { | 1058 | for (i = 0; i < len; i++) { |
964 | if (buff[i] == '*') { | 1059 | if (buff[i] == '*') { |
@@ -1012,8 +1107,12 @@ ftrace_match(unsigned char *buff, int len, int enable) | |||
1012 | matched = 1; | 1107 | matched = 1; |
1013 | break; | 1108 | break; |
1014 | } | 1109 | } |
1015 | if (matched) | 1110 | if (matched) { |
1016 | rec->flags |= flag; | 1111 | if (not) |
1112 | rec->flags &= ~flag; | ||
1113 | else | ||
1114 | rec->flags |= flag; | ||
1115 | } | ||
1017 | } | 1116 | } |
1018 | pg = pg->next; | 1117 | pg = pg->next; |
1019 | } | 1118 | } |
@@ -1181,7 +1280,7 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1181 | 1280 | ||
1182 | mutex_lock(&ftrace_sysctl_lock); | 1281 | mutex_lock(&ftrace_sysctl_lock); |
1183 | mutex_lock(&ftrace_start_lock); | 1282 | mutex_lock(&ftrace_start_lock); |
1184 | if (ftrace_start && ftrace_enabled) | 1283 | if (ftrace_start_up && ftrace_enabled) |
1185 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1284 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1186 | mutex_unlock(&ftrace_start_lock); | 1285 | mutex_unlock(&ftrace_start_lock); |
1187 | mutex_unlock(&ftrace_sysctl_lock); | 1286 | mutex_unlock(&ftrace_sysctl_lock); |
@@ -1233,12 +1332,233 @@ static struct file_operations ftrace_notrace_fops = { | |||
1233 | .release = ftrace_notrace_release, | 1332 | .release = ftrace_notrace_release, |
1234 | }; | 1333 | }; |
1235 | 1334 | ||
1236 | static __init int ftrace_init_debugfs(void) | 1335 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1336 | |||
1337 | static DEFINE_MUTEX(graph_lock); | ||
1338 | |||
1339 | int ftrace_graph_count; | ||
1340 | unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly; | ||
1341 | |||
1342 | static void * | ||
1343 | g_next(struct seq_file *m, void *v, loff_t *pos) | ||
1237 | { | 1344 | { |
1238 | struct dentry *d_tracer; | 1345 | unsigned long *array = m->private; |
1239 | struct dentry *entry; | 1346 | int index = *pos; |
1240 | 1347 | ||
1241 | d_tracer = tracing_init_dentry(); | 1348 | (*pos)++; |
1349 | |||
1350 | if (index >= ftrace_graph_count) | ||
1351 | return NULL; | ||
1352 | |||
1353 | return &array[index]; | ||
1354 | } | ||
1355 | |||
1356 | static void *g_start(struct seq_file *m, loff_t *pos) | ||
1357 | { | ||
1358 | void *p = NULL; | ||
1359 | |||
1360 | mutex_lock(&graph_lock); | ||
1361 | |||
1362 | p = g_next(m, p, pos); | ||
1363 | |||
1364 | return p; | ||
1365 | } | ||
1366 | |||
1367 | static void g_stop(struct seq_file *m, void *p) | ||
1368 | { | ||
1369 | mutex_unlock(&graph_lock); | ||
1370 | } | ||
1371 | |||
1372 | static int g_show(struct seq_file *m, void *v) | ||
1373 | { | ||
1374 | unsigned long *ptr = v; | ||
1375 | char str[KSYM_SYMBOL_LEN]; | ||
1376 | |||
1377 | if (!ptr) | ||
1378 | return 0; | ||
1379 | |||
1380 | kallsyms_lookup(*ptr, NULL, NULL, NULL, str); | ||
1381 | |||
1382 | seq_printf(m, "%s\n", str); | ||
1383 | |||
1384 | return 0; | ||
1385 | } | ||
1386 | |||
1387 | static struct seq_operations ftrace_graph_seq_ops = { | ||
1388 | .start = g_start, | ||
1389 | .next = g_next, | ||
1390 | .stop = g_stop, | ||
1391 | .show = g_show, | ||
1392 | }; | ||
1393 | |||
1394 | static int | ||
1395 | ftrace_graph_open(struct inode *inode, struct file *file) | ||
1396 | { | ||
1397 | int ret = 0; | ||
1398 | |||
1399 | if (unlikely(ftrace_disabled)) | ||
1400 | return -ENODEV; | ||
1401 | |||
1402 | mutex_lock(&graph_lock); | ||
1403 | if ((file->f_mode & FMODE_WRITE) && | ||
1404 | !(file->f_flags & O_APPEND)) { | ||
1405 | ftrace_graph_count = 0; | ||
1406 | memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); | ||
1407 | } | ||
1408 | |||
1409 | if (file->f_mode & FMODE_READ) { | ||
1410 | ret = seq_open(file, &ftrace_graph_seq_ops); | ||
1411 | if (!ret) { | ||
1412 | struct seq_file *m = file->private_data; | ||
1413 | m->private = ftrace_graph_funcs; | ||
1414 | } | ||
1415 | } else | ||
1416 | file->private_data = ftrace_graph_funcs; | ||
1417 | mutex_unlock(&graph_lock); | ||
1418 | |||
1419 | return ret; | ||
1420 | } | ||
1421 | |||
1422 | static ssize_t | ||
1423 | ftrace_graph_read(struct file *file, char __user *ubuf, | ||
1424 | size_t cnt, loff_t *ppos) | ||
1425 | { | ||
1426 | if (file->f_mode & FMODE_READ) | ||
1427 | return seq_read(file, ubuf, cnt, ppos); | ||
1428 | else | ||
1429 | return -EPERM; | ||
1430 | } | ||
1431 | |||
1432 | static int | ||
1433 | ftrace_set_func(unsigned long *array, int idx, char *buffer) | ||
1434 | { | ||
1435 | char str[KSYM_SYMBOL_LEN]; | ||
1436 | struct dyn_ftrace *rec; | ||
1437 | struct ftrace_page *pg; | ||
1438 | int found = 0; | ||
1439 | int i, j; | ||
1440 | |||
1441 | if (ftrace_disabled) | ||
1442 | return -ENODEV; | ||
1443 | |||
1444 | /* should not be called from interrupt context */ | ||
1445 | spin_lock(&ftrace_lock); | ||
1446 | |||
1447 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | ||
1448 | for (i = 0; i < pg->index; i++) { | ||
1449 | rec = &pg->records[i]; | ||
1450 | |||
1451 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | ||
1452 | continue; | ||
1453 | |||
1454 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1455 | if (strcmp(str, buffer) == 0) { | ||
1456 | found = 1; | ||
1457 | for (j = 0; j < idx; j++) | ||
1458 | if (array[j] == rec->ip) { | ||
1459 | found = 0; | ||
1460 | break; | ||
1461 | } | ||
1462 | if (found) | ||
1463 | array[idx] = rec->ip; | ||
1464 | break; | ||
1465 | } | ||
1466 | } | ||
1467 | } | ||
1468 | spin_unlock(&ftrace_lock); | ||
1469 | |||
1470 | return found ? 0 : -EINVAL; | ||
1471 | } | ||
1472 | |||
1473 | static ssize_t | ||
1474 | ftrace_graph_write(struct file *file, const char __user *ubuf, | ||
1475 | size_t cnt, loff_t *ppos) | ||
1476 | { | ||
1477 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | ||
1478 | unsigned long *array; | ||
1479 | size_t read = 0; | ||
1480 | ssize_t ret; | ||
1481 | int index = 0; | ||
1482 | char ch; | ||
1483 | |||
1484 | if (!cnt || cnt < 0) | ||
1485 | return 0; | ||
1486 | |||
1487 | mutex_lock(&graph_lock); | ||
1488 | |||
1489 | if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) { | ||
1490 | ret = -EBUSY; | ||
1491 | goto out; | ||
1492 | } | ||
1493 | |||
1494 | if (file->f_mode & FMODE_READ) { | ||
1495 | struct seq_file *m = file->private_data; | ||
1496 | array = m->private; | ||
1497 | } else | ||
1498 | array = file->private_data; | ||
1499 | |||
1500 | ret = get_user(ch, ubuf++); | ||
1501 | if (ret) | ||
1502 | goto out; | ||
1503 | read++; | ||
1504 | cnt--; | ||
1505 | |||
1506 | /* skip white space */ | ||
1507 | while (cnt && isspace(ch)) { | ||
1508 | ret = get_user(ch, ubuf++); | ||
1509 | if (ret) | ||
1510 | goto out; | ||
1511 | read++; | ||
1512 | cnt--; | ||
1513 | } | ||
1514 | |||
1515 | if (isspace(ch)) { | ||
1516 | *ppos += read; | ||
1517 | ret = read; | ||
1518 | goto out; | ||
1519 | } | ||
1520 | |||
1521 | while (cnt && !isspace(ch)) { | ||
1522 | if (index < FTRACE_BUFF_MAX) | ||
1523 | buffer[index++] = ch; | ||
1524 | else { | ||
1525 | ret = -EINVAL; | ||
1526 | goto out; | ||
1527 | } | ||
1528 | ret = get_user(ch, ubuf++); | ||
1529 | if (ret) | ||
1530 | goto out; | ||
1531 | read++; | ||
1532 | cnt--; | ||
1533 | } | ||
1534 | buffer[index] = 0; | ||
1535 | |||
1536 | /* we allow only one at a time */ | ||
1537 | ret = ftrace_set_func(array, ftrace_graph_count, buffer); | ||
1538 | if (ret) | ||
1539 | goto out; | ||
1540 | |||
1541 | ftrace_graph_count++; | ||
1542 | |||
1543 | file->f_pos += read; | ||
1544 | |||
1545 | ret = read; | ||
1546 | out: | ||
1547 | mutex_unlock(&graph_lock); | ||
1548 | |||
1549 | return ret; | ||
1550 | } | ||
1551 | |||
1552 | static const struct file_operations ftrace_graph_fops = { | ||
1553 | .open = ftrace_graph_open, | ||
1554 | .read = ftrace_graph_read, | ||
1555 | .write = ftrace_graph_write, | ||
1556 | }; | ||
1557 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1558 | |||
1559 | static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer) | ||
1560 | { | ||
1561 | struct dentry *entry; | ||
1242 | 1562 | ||
1243 | entry = debugfs_create_file("available_filter_functions", 0444, | 1563 | entry = debugfs_create_file("available_filter_functions", 0444, |
1244 | d_tracer, NULL, &ftrace_avail_fops); | 1564 | d_tracer, NULL, &ftrace_avail_fops); |
@@ -1263,12 +1583,20 @@ static __init int ftrace_init_debugfs(void) | |||
1263 | pr_warning("Could not create debugfs " | 1583 | pr_warning("Could not create debugfs " |
1264 | "'set_ftrace_notrace' entry\n"); | 1584 | "'set_ftrace_notrace' entry\n"); |
1265 | 1585 | ||
1586 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1587 | entry = debugfs_create_file("set_graph_function", 0444, d_tracer, | ||
1588 | NULL, | ||
1589 | &ftrace_graph_fops); | ||
1590 | if (!entry) | ||
1591 | pr_warning("Could not create debugfs " | ||
1592 | "'set_graph_function' entry\n"); | ||
1593 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1594 | |||
1266 | return 0; | 1595 | return 0; |
1267 | } | 1596 | } |
1268 | 1597 | ||
1269 | fs_initcall(ftrace_init_debugfs); | 1598 | static int ftrace_convert_nops(struct module *mod, |
1270 | 1599 | unsigned long *start, | |
1271 | static int ftrace_convert_nops(unsigned long *start, | ||
1272 | unsigned long *end) | 1600 | unsigned long *end) |
1273 | { | 1601 | { |
1274 | unsigned long *p; | 1602 | unsigned long *p; |
@@ -1279,23 +1607,32 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1279 | p = start; | 1607 | p = start; |
1280 | while (p < end) { | 1608 | while (p < end) { |
1281 | addr = ftrace_call_adjust(*p++); | 1609 | addr = ftrace_call_adjust(*p++); |
1610 | /* | ||
1611 | * Some architecture linkers will pad between | ||
1612 | * the different mcount_loc sections of different | ||
1613 | * object files to satisfy alignments. | ||
1614 | * Skip any NULL pointers. | ||
1615 | */ | ||
1616 | if (!addr) | ||
1617 | continue; | ||
1282 | ftrace_record_ip(addr); | 1618 | ftrace_record_ip(addr); |
1283 | } | 1619 | } |
1284 | 1620 | ||
1285 | /* disable interrupts to prevent kstop machine */ | 1621 | /* disable interrupts to prevent kstop machine */ |
1286 | local_irq_save(flags); | 1622 | local_irq_save(flags); |
1287 | ftrace_update_code(); | 1623 | ftrace_update_code(mod); |
1288 | local_irq_restore(flags); | 1624 | local_irq_restore(flags); |
1289 | mutex_unlock(&ftrace_start_lock); | 1625 | mutex_unlock(&ftrace_start_lock); |
1290 | 1626 | ||
1291 | return 0; | 1627 | return 0; |
1292 | } | 1628 | } |
1293 | 1629 | ||
1294 | void ftrace_init_module(unsigned long *start, unsigned long *end) | 1630 | void ftrace_init_module(struct module *mod, |
1631 | unsigned long *start, unsigned long *end) | ||
1295 | { | 1632 | { |
1296 | if (ftrace_disabled || start == end) | 1633 | if (ftrace_disabled || start == end) |
1297 | return; | 1634 | return; |
1298 | ftrace_convert_nops(start, end); | 1635 | ftrace_convert_nops(mod, start, end); |
1299 | } | 1636 | } |
1300 | 1637 | ||
1301 | extern unsigned long __start_mcount_loc[]; | 1638 | extern unsigned long __start_mcount_loc[]; |
@@ -1325,7 +1662,8 @@ void __init ftrace_init(void) | |||
1325 | 1662 | ||
1326 | last_ftrace_enabled = ftrace_enabled = 1; | 1663 | last_ftrace_enabled = ftrace_enabled = 1; |
1327 | 1664 | ||
1328 | ret = ftrace_convert_nops(__start_mcount_loc, | 1665 | ret = ftrace_convert_nops(NULL, |
1666 | __start_mcount_loc, | ||
1329 | __stop_mcount_loc); | 1667 | __stop_mcount_loc); |
1330 | 1668 | ||
1331 | return; | 1669 | return; |
@@ -1342,12 +1680,186 @@ static int __init ftrace_nodyn_init(void) | |||
1342 | } | 1680 | } |
1343 | device_initcall(ftrace_nodyn_init); | 1681 | device_initcall(ftrace_nodyn_init); |
1344 | 1682 | ||
1345 | # define ftrace_startup() do { } while (0) | 1683 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
1346 | # define ftrace_shutdown() do { } while (0) | 1684 | static inline void ftrace_startup_enable(int command) { } |
1685 | /* Keep as macros so we do not need to define the commands */ | ||
1686 | # define ftrace_startup(command) do { } while (0) | ||
1687 | # define ftrace_shutdown(command) do { } while (0) | ||
1347 | # define ftrace_startup_sysctl() do { } while (0) | 1688 | # define ftrace_startup_sysctl() do { } while (0) |
1348 | # define ftrace_shutdown_sysctl() do { } while (0) | 1689 | # define ftrace_shutdown_sysctl() do { } while (0) |
1349 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 1690 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1350 | 1691 | ||
1692 | static ssize_t | ||
1693 | ftrace_pid_read(struct file *file, char __user *ubuf, | ||
1694 | size_t cnt, loff_t *ppos) | ||
1695 | { | ||
1696 | char buf[64]; | ||
1697 | int r; | ||
1698 | |||
1699 | if (ftrace_pid_trace == ftrace_swapper_pid) | ||
1700 | r = sprintf(buf, "swapper tasks\n"); | ||
1701 | else if (ftrace_pid_trace) | ||
1702 | r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace)); | ||
1703 | else | ||
1704 | r = sprintf(buf, "no pid\n"); | ||
1705 | |||
1706 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
1707 | } | ||
1708 | |||
1709 | static void clear_ftrace_swapper(void) | ||
1710 | { | ||
1711 | struct task_struct *p; | ||
1712 | int cpu; | ||
1713 | |||
1714 | get_online_cpus(); | ||
1715 | for_each_online_cpu(cpu) { | ||
1716 | p = idle_task(cpu); | ||
1717 | clear_tsk_trace_trace(p); | ||
1718 | } | ||
1719 | put_online_cpus(); | ||
1720 | } | ||
1721 | |||
1722 | static void set_ftrace_swapper(void) | ||
1723 | { | ||
1724 | struct task_struct *p; | ||
1725 | int cpu; | ||
1726 | |||
1727 | get_online_cpus(); | ||
1728 | for_each_online_cpu(cpu) { | ||
1729 | p = idle_task(cpu); | ||
1730 | set_tsk_trace_trace(p); | ||
1731 | } | ||
1732 | put_online_cpus(); | ||
1733 | } | ||
1734 | |||
1735 | static void clear_ftrace_pid(struct pid *pid) | ||
1736 | { | ||
1737 | struct task_struct *p; | ||
1738 | |||
1739 | do_each_pid_task(pid, PIDTYPE_PID, p) { | ||
1740 | clear_tsk_trace_trace(p); | ||
1741 | } while_each_pid_task(pid, PIDTYPE_PID, p); | ||
1742 | put_pid(pid); | ||
1743 | } | ||
1744 | |||
1745 | static void set_ftrace_pid(struct pid *pid) | ||
1746 | { | ||
1747 | struct task_struct *p; | ||
1748 | |||
1749 | do_each_pid_task(pid, PIDTYPE_PID, p) { | ||
1750 | set_tsk_trace_trace(p); | ||
1751 | } while_each_pid_task(pid, PIDTYPE_PID, p); | ||
1752 | } | ||
1753 | |||
1754 | static void clear_ftrace_pid_task(struct pid **pid) | ||
1755 | { | ||
1756 | if (*pid == ftrace_swapper_pid) | ||
1757 | clear_ftrace_swapper(); | ||
1758 | else | ||
1759 | clear_ftrace_pid(*pid); | ||
1760 | |||
1761 | *pid = NULL; | ||
1762 | } | ||
1763 | |||
1764 | static void set_ftrace_pid_task(struct pid *pid) | ||
1765 | { | ||
1766 | if (pid == ftrace_swapper_pid) | ||
1767 | set_ftrace_swapper(); | ||
1768 | else | ||
1769 | set_ftrace_pid(pid); | ||
1770 | } | ||
1771 | |||
1772 | static ssize_t | ||
1773 | ftrace_pid_write(struct file *filp, const char __user *ubuf, | ||
1774 | size_t cnt, loff_t *ppos) | ||
1775 | { | ||
1776 | struct pid *pid; | ||
1777 | char buf[64]; | ||
1778 | long val; | ||
1779 | int ret; | ||
1780 | |||
1781 | if (cnt >= sizeof(buf)) | ||
1782 | return -EINVAL; | ||
1783 | |||
1784 | if (copy_from_user(&buf, ubuf, cnt)) | ||
1785 | return -EFAULT; | ||
1786 | |||
1787 | buf[cnt] = 0; | ||
1788 | |||
1789 | ret = strict_strtol(buf, 10, &val); | ||
1790 | if (ret < 0) | ||
1791 | return ret; | ||
1792 | |||
1793 | mutex_lock(&ftrace_start_lock); | ||
1794 | if (val < 0) { | ||
1795 | /* disable pid tracing */ | ||
1796 | if (!ftrace_pid_trace) | ||
1797 | goto out; | ||
1798 | |||
1799 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
1800 | |||
1801 | } else { | ||
1802 | /* swapper task is special */ | ||
1803 | if (!val) { | ||
1804 | pid = ftrace_swapper_pid; | ||
1805 | if (pid == ftrace_pid_trace) | ||
1806 | goto out; | ||
1807 | } else { | ||
1808 | pid = find_get_pid(val); | ||
1809 | |||
1810 | if (pid == ftrace_pid_trace) { | ||
1811 | put_pid(pid); | ||
1812 | goto out; | ||
1813 | } | ||
1814 | } | ||
1815 | |||
1816 | if (ftrace_pid_trace) | ||
1817 | clear_ftrace_pid_task(&ftrace_pid_trace); | ||
1818 | |||
1819 | if (!pid) | ||
1820 | goto out; | ||
1821 | |||
1822 | ftrace_pid_trace = pid; | ||
1823 | |||
1824 | set_ftrace_pid_task(ftrace_pid_trace); | ||
1825 | } | ||
1826 | |||
1827 | /* update the function call */ | ||
1828 | ftrace_update_pid_func(); | ||
1829 | ftrace_startup_enable(0); | ||
1830 | |||
1831 | out: | ||
1832 | mutex_unlock(&ftrace_start_lock); | ||
1833 | |||
1834 | return cnt; | ||
1835 | } | ||
1836 | |||
1837 | static struct file_operations ftrace_pid_fops = { | ||
1838 | .read = ftrace_pid_read, | ||
1839 | .write = ftrace_pid_write, | ||
1840 | }; | ||
1841 | |||
1842 | static __init int ftrace_init_debugfs(void) | ||
1843 | { | ||
1844 | struct dentry *d_tracer; | ||
1845 | struct dentry *entry; | ||
1846 | |||
1847 | d_tracer = tracing_init_dentry(); | ||
1848 | if (!d_tracer) | ||
1849 | return 0; | ||
1850 | |||
1851 | ftrace_init_dyn_debugfs(d_tracer); | ||
1852 | |||
1853 | entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer, | ||
1854 | NULL, &ftrace_pid_fops); | ||
1855 | if (!entry) | ||
1856 | pr_warning("Could not create debugfs " | ||
1857 | "'set_ftrace_pid' entry\n"); | ||
1858 | return 0; | ||
1859 | } | ||
1860 | |||
1861 | fs_initcall(ftrace_init_debugfs); | ||
1862 | |||
1351 | /** | 1863 | /** |
1352 | * ftrace_kill - kill ftrace | 1864 | * ftrace_kill - kill ftrace |
1353 | * | 1865 | * |
@@ -1381,10 +1893,11 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1381 | return -1; | 1893 | return -1; |
1382 | 1894 | ||
1383 | mutex_lock(&ftrace_sysctl_lock); | 1895 | mutex_lock(&ftrace_sysctl_lock); |
1896 | |||
1384 | ret = __register_ftrace_function(ops); | 1897 | ret = __register_ftrace_function(ops); |
1385 | ftrace_startup(); | 1898 | ftrace_startup(0); |
1386 | mutex_unlock(&ftrace_sysctl_lock); | ||
1387 | 1899 | ||
1900 | mutex_unlock(&ftrace_sysctl_lock); | ||
1388 | return ret; | 1901 | return ret; |
1389 | } | 1902 | } |
1390 | 1903 | ||
@@ -1400,7 +1913,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
1400 | 1913 | ||
1401 | mutex_lock(&ftrace_sysctl_lock); | 1914 | mutex_lock(&ftrace_sysctl_lock); |
1402 | ret = __unregister_ftrace_function(ops); | 1915 | ret = __unregister_ftrace_function(ops); |
1403 | ftrace_shutdown(); | 1916 | ftrace_shutdown(0); |
1404 | mutex_unlock(&ftrace_sysctl_lock); | 1917 | mutex_unlock(&ftrace_sysctl_lock); |
1405 | 1918 | ||
1406 | return ret; | 1919 | return ret; |
@@ -1449,3 +1962,153 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1449 | return ret; | 1962 | return ret; |
1450 | } | 1963 | } |
1451 | 1964 | ||
1965 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1966 | |||
1967 | static atomic_t ftrace_graph_active; | ||
1968 | |||
1969 | int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) | ||
1970 | { | ||
1971 | return 0; | ||
1972 | } | ||
1973 | |||
1974 | /* The callbacks that hook a function */ | ||
1975 | trace_func_graph_ret_t ftrace_graph_return = | ||
1976 | (trace_func_graph_ret_t)ftrace_stub; | ||
1977 | trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub; | ||
1978 | |||
1979 | /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */ | ||
1980 | static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | ||
1981 | { | ||
1982 | int i; | ||
1983 | int ret = 0; | ||
1984 | unsigned long flags; | ||
1985 | int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE; | ||
1986 | struct task_struct *g, *t; | ||
1987 | |||
1988 | for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) { | ||
1989 | ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH | ||
1990 | * sizeof(struct ftrace_ret_stack), | ||
1991 | GFP_KERNEL); | ||
1992 | if (!ret_stack_list[i]) { | ||
1993 | start = 0; | ||
1994 | end = i; | ||
1995 | ret = -ENOMEM; | ||
1996 | goto free; | ||
1997 | } | ||
1998 | } | ||
1999 | |||
2000 | read_lock_irqsave(&tasklist_lock, flags); | ||
2001 | do_each_thread(g, t) { | ||
2002 | if (start == end) { | ||
2003 | ret = -EAGAIN; | ||
2004 | goto unlock; | ||
2005 | } | ||
2006 | |||
2007 | if (t->ret_stack == NULL) { | ||
2008 | t->curr_ret_stack = -1; | ||
2009 | /* Make sure IRQs see the -1 first: */ | ||
2010 | barrier(); | ||
2011 | t->ret_stack = ret_stack_list[start++]; | ||
2012 | atomic_set(&t->tracing_graph_pause, 0); | ||
2013 | atomic_set(&t->trace_overrun, 0); | ||
2014 | } | ||
2015 | } while_each_thread(g, t); | ||
2016 | |||
2017 | unlock: | ||
2018 | read_unlock_irqrestore(&tasklist_lock, flags); | ||
2019 | free: | ||
2020 | for (i = start; i < end; i++) | ||
2021 | kfree(ret_stack_list[i]); | ||
2022 | return ret; | ||
2023 | } | ||
2024 | |||
2025 | /* Allocate a return stack for each task */ | ||
2026 | static int start_graph_tracing(void) | ||
2027 | { | ||
2028 | struct ftrace_ret_stack **ret_stack_list; | ||
2029 | int ret; | ||
2030 | |||
2031 | ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * | ||
2032 | sizeof(struct ftrace_ret_stack *), | ||
2033 | GFP_KERNEL); | ||
2034 | |||
2035 | if (!ret_stack_list) | ||
2036 | return -ENOMEM; | ||
2037 | |||
2038 | do { | ||
2039 | ret = alloc_retstack_tasklist(ret_stack_list); | ||
2040 | } while (ret == -EAGAIN); | ||
2041 | |||
2042 | kfree(ret_stack_list); | ||
2043 | return ret; | ||
2044 | } | ||
2045 | |||
2046 | int register_ftrace_graph(trace_func_graph_ret_t retfunc, | ||
2047 | trace_func_graph_ent_t entryfunc) | ||
2048 | { | ||
2049 | int ret = 0; | ||
2050 | |||
2051 | mutex_lock(&ftrace_sysctl_lock); | ||
2052 | |||
2053 | atomic_inc(&ftrace_graph_active); | ||
2054 | ret = start_graph_tracing(); | ||
2055 | if (ret) { | ||
2056 | atomic_dec(&ftrace_graph_active); | ||
2057 | goto out; | ||
2058 | } | ||
2059 | |||
2060 | ftrace_graph_return = retfunc; | ||
2061 | ftrace_graph_entry = entryfunc; | ||
2062 | |||
2063 | ftrace_startup(FTRACE_START_FUNC_RET); | ||
2064 | |||
2065 | out: | ||
2066 | mutex_unlock(&ftrace_sysctl_lock); | ||
2067 | return ret; | ||
2068 | } | ||
2069 | |||
2070 | void unregister_ftrace_graph(void) | ||
2071 | { | ||
2072 | mutex_lock(&ftrace_sysctl_lock); | ||
2073 | |||
2074 | atomic_dec(&ftrace_graph_active); | ||
2075 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | ||
2076 | ftrace_graph_entry = ftrace_graph_entry_stub; | ||
2077 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | ||
2078 | |||
2079 | mutex_unlock(&ftrace_sysctl_lock); | ||
2080 | } | ||
2081 | |||
2082 | /* Allocate a return stack for newly created task */ | ||
2083 | void ftrace_graph_init_task(struct task_struct *t) | ||
2084 | { | ||
2085 | if (atomic_read(&ftrace_graph_active)) { | ||
2086 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
2087 | * sizeof(struct ftrace_ret_stack), | ||
2088 | GFP_KERNEL); | ||
2089 | if (!t->ret_stack) | ||
2090 | return; | ||
2091 | t->curr_ret_stack = -1; | ||
2092 | atomic_set(&t->tracing_graph_pause, 0); | ||
2093 | atomic_set(&t->trace_overrun, 0); | ||
2094 | } else | ||
2095 | t->ret_stack = NULL; | ||
2096 | } | ||
2097 | |||
2098 | void ftrace_graph_exit_task(struct task_struct *t) | ||
2099 | { | ||
2100 | struct ftrace_ret_stack *ret_stack = t->ret_stack; | ||
2101 | |||
2102 | t->ret_stack = NULL; | ||
2103 | /* NULL must become visible to IRQs before we free it: */ | ||
2104 | barrier(); | ||
2105 | |||
2106 | kfree(ret_stack); | ||
2107 | } | ||
2108 | |||
2109 | void ftrace_graph_stop(void) | ||
2110 | { | ||
2111 | ftrace_stop(); | ||
2112 | } | ||
2113 | #endif | ||
2114 | |||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 668bbb5ef2bd..76f34c0ef29c 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -18,8 +18,46 @@ | |||
18 | 18 | ||
19 | #include "trace.h" | 19 | #include "trace.h" |
20 | 20 | ||
21 | /* Global flag to disable all recording to ring buffers */ | 21 | /* |
22 | static int ring_buffers_off __read_mostly; | 22 | * A fast way to enable or disable all ring buffers is to |
23 | * call tracing_on or tracing_off. Turning off the ring buffers | ||
24 | * prevents all ring buffers from being recorded to. | ||
25 | * Turning this switch on, makes it OK to write to the | ||
26 | * ring buffer, if the ring buffer is enabled itself. | ||
27 | * | ||
28 | * There's three layers that must be on in order to write | ||
29 | * to the ring buffer. | ||
30 | * | ||
31 | * 1) This global flag must be set. | ||
32 | * 2) The ring buffer must be enabled for recording. | ||
33 | * 3) The per cpu buffer must be enabled for recording. | ||
34 | * | ||
35 | * In case of an anomaly, this global flag has a bit set that | ||
36 | * will permantly disable all ring buffers. | ||
37 | */ | ||
38 | |||
39 | /* | ||
40 | * Global flag to disable all recording to ring buffers | ||
41 | * This has two bits: ON, DISABLED | ||
42 | * | ||
43 | * ON DISABLED | ||
44 | * ---- ---------- | ||
45 | * 0 0 : ring buffers are off | ||
46 | * 1 0 : ring buffers are on | ||
47 | * X 1 : ring buffers are permanently disabled | ||
48 | */ | ||
49 | |||
50 | enum { | ||
51 | RB_BUFFERS_ON_BIT = 0, | ||
52 | RB_BUFFERS_DISABLED_BIT = 1, | ||
53 | }; | ||
54 | |||
55 | enum { | ||
56 | RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, | ||
57 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, | ||
58 | }; | ||
59 | |||
60 | static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | ||
23 | 61 | ||
24 | /** | 62 | /** |
25 | * tracing_on - enable all tracing buffers | 63 | * tracing_on - enable all tracing buffers |
@@ -29,7 +67,7 @@ static int ring_buffers_off __read_mostly; | |||
29 | */ | 67 | */ |
30 | void tracing_on(void) | 68 | void tracing_on(void) |
31 | { | 69 | { |
32 | ring_buffers_off = 0; | 70 | set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
33 | } | 71 | } |
34 | 72 | ||
35 | /** | 73 | /** |
@@ -42,9 +80,22 @@ void tracing_on(void) | |||
42 | */ | 80 | */ |
43 | void tracing_off(void) | 81 | void tracing_off(void) |
44 | { | 82 | { |
45 | ring_buffers_off = 1; | 83 | clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags); |
46 | } | 84 | } |
47 | 85 | ||
86 | /** | ||
87 | * tracing_off_permanent - permanently disable ring buffers | ||
88 | * | ||
89 | * This function, once called, will disable all ring buffers | ||
90 | * permanenty. | ||
91 | */ | ||
92 | void tracing_off_permanent(void) | ||
93 | { | ||
94 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | ||
95 | } | ||
96 | |||
97 | #include "trace.h" | ||
98 | |||
48 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 99 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
49 | #define DEBUG_SHIFT 0 | 100 | #define DEBUG_SHIFT 0 |
50 | 101 | ||
@@ -56,7 +107,7 @@ u64 ring_buffer_time_stamp(int cpu) | |||
56 | preempt_disable_notrace(); | 107 | preempt_disable_notrace(); |
57 | /* shift to debug/test normalization and TIME_EXTENTS */ | 108 | /* shift to debug/test normalization and TIME_EXTENTS */ |
58 | time = sched_clock() << DEBUG_SHIFT; | 109 | time = sched_clock() << DEBUG_SHIFT; |
59 | preempt_enable_notrace(); | 110 | preempt_enable_no_resched_notrace(); |
60 | 111 | ||
61 | return time; | 112 | return time; |
62 | } | 113 | } |
@@ -144,20 +195,24 @@ void *ring_buffer_event_data(struct ring_buffer_event *event) | |||
144 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) | 195 | #define TS_MASK ((1ULL << TS_SHIFT) - 1) |
145 | #define TS_DELTA_TEST (~TS_MASK) | 196 | #define TS_DELTA_TEST (~TS_MASK) |
146 | 197 | ||
147 | /* | 198 | struct buffer_data_page { |
148 | * This hack stolen from mm/slob.c. | ||
149 | * We can store per page timing information in the page frame of the page. | ||
150 | * Thanks to Peter Zijlstra for suggesting this idea. | ||
151 | */ | ||
152 | struct buffer_page { | ||
153 | u64 time_stamp; /* page time stamp */ | 199 | u64 time_stamp; /* page time stamp */ |
154 | local_t write; /* index for next write */ | ||
155 | local_t commit; /* write commited index */ | 200 | local_t commit; /* write commited index */ |
201 | unsigned char data[]; /* data of buffer page */ | ||
202 | }; | ||
203 | |||
204 | struct buffer_page { | ||
205 | local_t write; /* index for next write */ | ||
156 | unsigned read; /* index for next read */ | 206 | unsigned read; /* index for next read */ |
157 | struct list_head list; /* list of free pages */ | 207 | struct list_head list; /* list of free pages */ |
158 | void *page; /* Actual data page */ | 208 | struct buffer_data_page *page; /* Actual data page */ |
159 | }; | 209 | }; |
160 | 210 | ||
211 | static void rb_init_page(struct buffer_data_page *bpage) | ||
212 | { | ||
213 | local_set(&bpage->commit, 0); | ||
214 | } | ||
215 | |||
161 | /* | 216 | /* |
162 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 217 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
163 | * this issue out. | 218 | * this issue out. |
@@ -179,7 +234,7 @@ static inline int test_time_stamp(u64 delta) | |||
179 | return 0; | 234 | return 0; |
180 | } | 235 | } |
181 | 236 | ||
182 | #define BUF_PAGE_SIZE PAGE_SIZE | 237 | #define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) |
183 | 238 | ||
184 | /* | 239 | /* |
185 | * head_page == tail_page && head == tail then buffer is empty. | 240 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -187,7 +242,8 @@ static inline int test_time_stamp(u64 delta) | |||
187 | struct ring_buffer_per_cpu { | 242 | struct ring_buffer_per_cpu { |
188 | int cpu; | 243 | int cpu; |
189 | struct ring_buffer *buffer; | 244 | struct ring_buffer *buffer; |
190 | spinlock_t lock; | 245 | spinlock_t reader_lock; /* serialize readers */ |
246 | raw_spinlock_t lock; | ||
191 | struct lock_class_key lock_key; | 247 | struct lock_class_key lock_key; |
192 | struct list_head pages; | 248 | struct list_head pages; |
193 | struct buffer_page *head_page; /* read from head */ | 249 | struct buffer_page *head_page; /* read from head */ |
@@ -202,7 +258,6 @@ struct ring_buffer_per_cpu { | |||
202 | }; | 258 | }; |
203 | 259 | ||
204 | struct ring_buffer { | 260 | struct ring_buffer { |
205 | unsigned long size; | ||
206 | unsigned pages; | 261 | unsigned pages; |
207 | unsigned flags; | 262 | unsigned flags; |
208 | int cpus; | 263 | int cpus; |
@@ -221,32 +276,16 @@ struct ring_buffer_iter { | |||
221 | u64 read_stamp; | 276 | u64 read_stamp; |
222 | }; | 277 | }; |
223 | 278 | ||
279 | /* buffer may be either ring_buffer or ring_buffer_per_cpu */ | ||
224 | #define RB_WARN_ON(buffer, cond) \ | 280 | #define RB_WARN_ON(buffer, cond) \ |
225 | do { \ | 281 | ({ \ |
226 | if (unlikely(cond)) { \ | 282 | int _____ret = unlikely(cond); \ |
227 | atomic_inc(&buffer->record_disabled); \ | 283 | if (_____ret) { \ |
228 | WARN_ON(1); \ | ||
229 | } \ | ||
230 | } while (0) | ||
231 | |||
232 | #define RB_WARN_ON_RET(buffer, cond) \ | ||
233 | do { \ | ||
234 | if (unlikely(cond)) { \ | ||
235 | atomic_inc(&buffer->record_disabled); \ | ||
236 | WARN_ON(1); \ | ||
237 | return -1; \ | ||
238 | } \ | ||
239 | } while (0) | ||
240 | |||
241 | #define RB_WARN_ON_ONCE(buffer, cond) \ | ||
242 | do { \ | ||
243 | static int once; \ | ||
244 | if (unlikely(cond) && !once) { \ | ||
245 | once++; \ | ||
246 | atomic_inc(&buffer->record_disabled); \ | 284 | atomic_inc(&buffer->record_disabled); \ |
247 | WARN_ON(1); \ | 285 | WARN_ON(1); \ |
248 | } \ | 286 | } \ |
249 | } while (0) | 287 | _____ret; \ |
288 | }) | ||
250 | 289 | ||
251 | /** | 290 | /** |
252 | * check_pages - integrity check of buffer pages | 291 | * check_pages - integrity check of buffer pages |
@@ -258,16 +297,20 @@ struct ring_buffer_iter { | |||
258 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 297 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
259 | { | 298 | { |
260 | struct list_head *head = &cpu_buffer->pages; | 299 | struct list_head *head = &cpu_buffer->pages; |
261 | struct buffer_page *page, *tmp; | 300 | struct buffer_page *bpage, *tmp; |
262 | 301 | ||
263 | RB_WARN_ON_RET(cpu_buffer, head->next->prev != head); | 302 | if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) |
264 | RB_WARN_ON_RET(cpu_buffer, head->prev->next != head); | 303 | return -1; |
304 | if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) | ||
305 | return -1; | ||
265 | 306 | ||
266 | list_for_each_entry_safe(page, tmp, head, list) { | 307 | list_for_each_entry_safe(bpage, tmp, head, list) { |
267 | RB_WARN_ON_RET(cpu_buffer, | 308 | if (RB_WARN_ON(cpu_buffer, |
268 | page->list.next->prev != &page->list); | 309 | bpage->list.next->prev != &bpage->list)) |
269 | RB_WARN_ON_RET(cpu_buffer, | 310 | return -1; |
270 | page->list.prev->next != &page->list); | 311 | if (RB_WARN_ON(cpu_buffer, |
312 | bpage->list.prev->next != &bpage->list)) | ||
313 | return -1; | ||
271 | } | 314 | } |
272 | 315 | ||
273 | return 0; | 316 | return 0; |
@@ -277,22 +320,23 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
277 | unsigned nr_pages) | 320 | unsigned nr_pages) |
278 | { | 321 | { |
279 | struct list_head *head = &cpu_buffer->pages; | 322 | struct list_head *head = &cpu_buffer->pages; |
280 | struct buffer_page *page, *tmp; | 323 | struct buffer_page *bpage, *tmp; |
281 | unsigned long addr; | 324 | unsigned long addr; |
282 | LIST_HEAD(pages); | 325 | LIST_HEAD(pages); |
283 | unsigned i; | 326 | unsigned i; |
284 | 327 | ||
285 | for (i = 0; i < nr_pages; i++) { | 328 | for (i = 0; i < nr_pages; i++) { |
286 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 329 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
287 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 330 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); |
288 | if (!page) | 331 | if (!bpage) |
289 | goto free_pages; | 332 | goto free_pages; |
290 | list_add(&page->list, &pages); | 333 | list_add(&bpage->list, &pages); |
291 | 334 | ||
292 | addr = __get_free_page(GFP_KERNEL); | 335 | addr = __get_free_page(GFP_KERNEL); |
293 | if (!addr) | 336 | if (!addr) |
294 | goto free_pages; | 337 | goto free_pages; |
295 | page->page = (void *)addr; | 338 | bpage->page = (void *)addr; |
339 | rb_init_page(bpage->page); | ||
296 | } | 340 | } |
297 | 341 | ||
298 | list_splice(&pages, head); | 342 | list_splice(&pages, head); |
@@ -302,9 +346,9 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
302 | return 0; | 346 | return 0; |
303 | 347 | ||
304 | free_pages: | 348 | free_pages: |
305 | list_for_each_entry_safe(page, tmp, &pages, list) { | 349 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
306 | list_del_init(&page->list); | 350 | list_del_init(&bpage->list); |
307 | free_buffer_page(page); | 351 | free_buffer_page(bpage); |
308 | } | 352 | } |
309 | return -ENOMEM; | 353 | return -ENOMEM; |
310 | } | 354 | } |
@@ -313,7 +357,7 @@ static struct ring_buffer_per_cpu * | |||
313 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | 357 | rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) |
314 | { | 358 | { |
315 | struct ring_buffer_per_cpu *cpu_buffer; | 359 | struct ring_buffer_per_cpu *cpu_buffer; |
316 | struct buffer_page *page; | 360 | struct buffer_page *bpage; |
317 | unsigned long addr; | 361 | unsigned long addr; |
318 | int ret; | 362 | int ret; |
319 | 363 | ||
@@ -324,19 +368,21 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
324 | 368 | ||
325 | cpu_buffer->cpu = cpu; | 369 | cpu_buffer->cpu = cpu; |
326 | cpu_buffer->buffer = buffer; | 370 | cpu_buffer->buffer = buffer; |
327 | spin_lock_init(&cpu_buffer->lock); | 371 | spin_lock_init(&cpu_buffer->reader_lock); |
372 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
328 | INIT_LIST_HEAD(&cpu_buffer->pages); | 373 | INIT_LIST_HEAD(&cpu_buffer->pages); |
329 | 374 | ||
330 | page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()), | 375 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
331 | GFP_KERNEL, cpu_to_node(cpu)); | 376 | GFP_KERNEL, cpu_to_node(cpu)); |
332 | if (!page) | 377 | if (!bpage) |
333 | goto fail_free_buffer; | 378 | goto fail_free_buffer; |
334 | 379 | ||
335 | cpu_buffer->reader_page = page; | 380 | cpu_buffer->reader_page = bpage; |
336 | addr = __get_free_page(GFP_KERNEL); | 381 | addr = __get_free_page(GFP_KERNEL); |
337 | if (!addr) | 382 | if (!addr) |
338 | goto fail_free_reader; | 383 | goto fail_free_reader; |
339 | page->page = (void *)addr; | 384 | bpage->page = (void *)addr; |
385 | rb_init_page(bpage->page); | ||
340 | 386 | ||
341 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 387 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
342 | 388 | ||
@@ -361,14 +407,14 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
361 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) | 407 | static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) |
362 | { | 408 | { |
363 | struct list_head *head = &cpu_buffer->pages; | 409 | struct list_head *head = &cpu_buffer->pages; |
364 | struct buffer_page *page, *tmp; | 410 | struct buffer_page *bpage, *tmp; |
365 | 411 | ||
366 | list_del_init(&cpu_buffer->reader_page->list); | 412 | list_del_init(&cpu_buffer->reader_page->list); |
367 | free_buffer_page(cpu_buffer->reader_page); | 413 | free_buffer_page(cpu_buffer->reader_page); |
368 | 414 | ||
369 | list_for_each_entry_safe(page, tmp, head, list) { | 415 | list_for_each_entry_safe(bpage, tmp, head, list) { |
370 | list_del_init(&page->list); | 416 | list_del_init(&bpage->list); |
371 | free_buffer_page(page); | 417 | free_buffer_page(bpage); |
372 | } | 418 | } |
373 | kfree(cpu_buffer); | 419 | kfree(cpu_buffer); |
374 | } | 420 | } |
@@ -465,7 +511,7 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | |||
465 | static void | 511 | static void |
466 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 512 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) |
467 | { | 513 | { |
468 | struct buffer_page *page; | 514 | struct buffer_page *bpage; |
469 | struct list_head *p; | 515 | struct list_head *p; |
470 | unsigned i; | 516 | unsigned i; |
471 | 517 | ||
@@ -473,13 +519,15 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
473 | synchronize_sched(); | 519 | synchronize_sched(); |
474 | 520 | ||
475 | for (i = 0; i < nr_pages; i++) { | 521 | for (i = 0; i < nr_pages; i++) { |
476 | BUG_ON(list_empty(&cpu_buffer->pages)); | 522 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
523 | return; | ||
477 | p = cpu_buffer->pages.next; | 524 | p = cpu_buffer->pages.next; |
478 | page = list_entry(p, struct buffer_page, list); | 525 | bpage = list_entry(p, struct buffer_page, list); |
479 | list_del_init(&page->list); | 526 | list_del_init(&bpage->list); |
480 | free_buffer_page(page); | 527 | free_buffer_page(bpage); |
481 | } | 528 | } |
482 | BUG_ON(list_empty(&cpu_buffer->pages)); | 529 | if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages))) |
530 | return; | ||
483 | 531 | ||
484 | rb_reset_cpu(cpu_buffer); | 532 | rb_reset_cpu(cpu_buffer); |
485 | 533 | ||
@@ -493,7 +541,7 @@ static void | |||
493 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | 541 | rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, |
494 | struct list_head *pages, unsigned nr_pages) | 542 | struct list_head *pages, unsigned nr_pages) |
495 | { | 543 | { |
496 | struct buffer_page *page; | 544 | struct buffer_page *bpage; |
497 | struct list_head *p; | 545 | struct list_head *p; |
498 | unsigned i; | 546 | unsigned i; |
499 | 547 | ||
@@ -501,11 +549,12 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
501 | synchronize_sched(); | 549 | synchronize_sched(); |
502 | 550 | ||
503 | for (i = 0; i < nr_pages; i++) { | 551 | for (i = 0; i < nr_pages; i++) { |
504 | BUG_ON(list_empty(pages)); | 552 | if (RB_WARN_ON(cpu_buffer, list_empty(pages))) |
553 | return; | ||
505 | p = pages->next; | 554 | p = pages->next; |
506 | page = list_entry(p, struct buffer_page, list); | 555 | bpage = list_entry(p, struct buffer_page, list); |
507 | list_del_init(&page->list); | 556 | list_del_init(&bpage->list); |
508 | list_add_tail(&page->list, &cpu_buffer->pages); | 557 | list_add_tail(&bpage->list, &cpu_buffer->pages); |
509 | } | 558 | } |
510 | rb_reset_cpu(cpu_buffer); | 559 | rb_reset_cpu(cpu_buffer); |
511 | 560 | ||
@@ -532,7 +581,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
532 | { | 581 | { |
533 | struct ring_buffer_per_cpu *cpu_buffer; | 582 | struct ring_buffer_per_cpu *cpu_buffer; |
534 | unsigned nr_pages, rm_pages, new_pages; | 583 | unsigned nr_pages, rm_pages, new_pages; |
535 | struct buffer_page *page, *tmp; | 584 | struct buffer_page *bpage, *tmp; |
536 | unsigned long buffer_size; | 585 | unsigned long buffer_size; |
537 | unsigned long addr; | 586 | unsigned long addr; |
538 | LIST_HEAD(pages); | 587 | LIST_HEAD(pages); |
@@ -562,7 +611,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
562 | if (size < buffer_size) { | 611 | if (size < buffer_size) { |
563 | 612 | ||
564 | /* easy case, just free pages */ | 613 | /* easy case, just free pages */ |
565 | BUG_ON(nr_pages >= buffer->pages); | 614 | if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) { |
615 | mutex_unlock(&buffer->mutex); | ||
616 | return -1; | ||
617 | } | ||
566 | 618 | ||
567 | rm_pages = buffer->pages - nr_pages; | 619 | rm_pages = buffer->pages - nr_pages; |
568 | 620 | ||
@@ -581,21 +633,26 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
581 | * add these pages to the cpu_buffers. Otherwise we just free | 633 | * add these pages to the cpu_buffers. Otherwise we just free |
582 | * them all and return -ENOMEM; | 634 | * them all and return -ENOMEM; |
583 | */ | 635 | */ |
584 | BUG_ON(nr_pages <= buffer->pages); | 636 | if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) { |
637 | mutex_unlock(&buffer->mutex); | ||
638 | return -1; | ||
639 | } | ||
640 | |||
585 | new_pages = nr_pages - buffer->pages; | 641 | new_pages = nr_pages - buffer->pages; |
586 | 642 | ||
587 | for_each_buffer_cpu(buffer, cpu) { | 643 | for_each_buffer_cpu(buffer, cpu) { |
588 | for (i = 0; i < new_pages; i++) { | 644 | for (i = 0; i < new_pages; i++) { |
589 | page = kzalloc_node(ALIGN(sizeof(*page), | 645 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
590 | cache_line_size()), | 646 | cache_line_size()), |
591 | GFP_KERNEL, cpu_to_node(cpu)); | 647 | GFP_KERNEL, cpu_to_node(cpu)); |
592 | if (!page) | 648 | if (!bpage) |
593 | goto free_pages; | 649 | goto free_pages; |
594 | list_add(&page->list, &pages); | 650 | list_add(&bpage->list, &pages); |
595 | addr = __get_free_page(GFP_KERNEL); | 651 | addr = __get_free_page(GFP_KERNEL); |
596 | if (!addr) | 652 | if (!addr) |
597 | goto free_pages; | 653 | goto free_pages; |
598 | page->page = (void *)addr; | 654 | bpage->page = (void *)addr; |
655 | rb_init_page(bpage->page); | ||
599 | } | 656 | } |
600 | } | 657 | } |
601 | 658 | ||
@@ -604,7 +661,10 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
604 | rb_insert_pages(cpu_buffer, &pages, new_pages); | 661 | rb_insert_pages(cpu_buffer, &pages, new_pages); |
605 | } | 662 | } |
606 | 663 | ||
607 | BUG_ON(!list_empty(&pages)); | 664 | if (RB_WARN_ON(buffer, !list_empty(&pages))) { |
665 | mutex_unlock(&buffer->mutex); | ||
666 | return -1; | ||
667 | } | ||
608 | 668 | ||
609 | out: | 669 | out: |
610 | buffer->pages = nr_pages; | 670 | buffer->pages = nr_pages; |
@@ -613,9 +673,9 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
613 | return size; | 673 | return size; |
614 | 674 | ||
615 | free_pages: | 675 | free_pages: |
616 | list_for_each_entry_safe(page, tmp, &pages, list) { | 676 | list_for_each_entry_safe(bpage, tmp, &pages, list) { |
617 | list_del_init(&page->list); | 677 | list_del_init(&bpage->list); |
618 | free_buffer_page(page); | 678 | free_buffer_page(bpage); |
619 | } | 679 | } |
620 | mutex_unlock(&buffer->mutex); | 680 | mutex_unlock(&buffer->mutex); |
621 | return -ENOMEM; | 681 | return -ENOMEM; |
@@ -626,9 +686,15 @@ static inline int rb_null_event(struct ring_buffer_event *event) | |||
626 | return event->type == RINGBUF_TYPE_PADDING; | 686 | return event->type == RINGBUF_TYPE_PADDING; |
627 | } | 687 | } |
628 | 688 | ||
629 | static inline void *__rb_page_index(struct buffer_page *page, unsigned index) | 689 | static inline void * |
690 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) | ||
691 | { | ||
692 | return bpage->data + index; | ||
693 | } | ||
694 | |||
695 | static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) | ||
630 | { | 696 | { |
631 | return page->page + index; | 697 | return bpage->page->data + index; |
632 | } | 698 | } |
633 | 699 | ||
634 | static inline struct ring_buffer_event * | 700 | static inline struct ring_buffer_event * |
@@ -658,7 +724,7 @@ static inline unsigned rb_page_write(struct buffer_page *bpage) | |||
658 | 724 | ||
659 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 725 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
660 | { | 726 | { |
661 | return local_read(&bpage->commit); | 727 | return local_read(&bpage->page->commit); |
662 | } | 728 | } |
663 | 729 | ||
664 | /* Size is determined by what has been commited */ | 730 | /* Size is determined by what has been commited */ |
@@ -693,7 +759,8 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | |||
693 | head += rb_event_length(event)) { | 759 | head += rb_event_length(event)) { |
694 | 760 | ||
695 | event = __rb_page_index(cpu_buffer->head_page, head); | 761 | event = __rb_page_index(cpu_buffer->head_page, head); |
696 | BUG_ON(rb_null_event(event)); | 762 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) |
763 | return; | ||
697 | /* Only count data entries */ | 764 | /* Only count data entries */ |
698 | if (event->type != RINGBUF_TYPE_DATA) | 765 | if (event->type != RINGBUF_TYPE_DATA) |
699 | continue; | 766 | continue; |
@@ -703,14 +770,14 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer) | |||
703 | } | 770 | } |
704 | 771 | ||
705 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, | 772 | static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, |
706 | struct buffer_page **page) | 773 | struct buffer_page **bpage) |
707 | { | 774 | { |
708 | struct list_head *p = (*page)->list.next; | 775 | struct list_head *p = (*bpage)->list.next; |
709 | 776 | ||
710 | if (p == &cpu_buffer->pages) | 777 | if (p == &cpu_buffer->pages) |
711 | p = p->next; | 778 | p = p->next; |
712 | 779 | ||
713 | *page = list_entry(p, struct buffer_page, list); | 780 | *bpage = list_entry(p, struct buffer_page, list); |
714 | } | 781 | } |
715 | 782 | ||
716 | static inline unsigned | 783 | static inline unsigned |
@@ -746,16 +813,18 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
746 | addr &= PAGE_MASK; | 813 | addr &= PAGE_MASK; |
747 | 814 | ||
748 | while (cpu_buffer->commit_page->page != (void *)addr) { | 815 | while (cpu_buffer->commit_page->page != (void *)addr) { |
749 | RB_WARN_ON(cpu_buffer, | 816 | if (RB_WARN_ON(cpu_buffer, |
750 | cpu_buffer->commit_page == cpu_buffer->tail_page); | 817 | cpu_buffer->commit_page == cpu_buffer->tail_page)) |
751 | cpu_buffer->commit_page->commit = | 818 | return; |
819 | cpu_buffer->commit_page->page->commit = | ||
752 | cpu_buffer->commit_page->write; | 820 | cpu_buffer->commit_page->write; |
753 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 821 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
754 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 822 | cpu_buffer->write_stamp = |
823 | cpu_buffer->commit_page->page->time_stamp; | ||
755 | } | 824 | } |
756 | 825 | ||
757 | /* Now set the commit to the event's index */ | 826 | /* Now set the commit to the event's index */ |
758 | local_set(&cpu_buffer->commit_page->commit, index); | 827 | local_set(&cpu_buffer->commit_page->page->commit, index); |
759 | } | 828 | } |
760 | 829 | ||
761 | static inline void | 830 | static inline void |
@@ -769,25 +838,38 @@ rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | |||
769 | * back to us). This allows us to do a simple loop to | 838 | * back to us). This allows us to do a simple loop to |
770 | * assign the commit to the tail. | 839 | * assign the commit to the tail. |
771 | */ | 840 | */ |
841 | again: | ||
772 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { | 842 | while (cpu_buffer->commit_page != cpu_buffer->tail_page) { |
773 | cpu_buffer->commit_page->commit = | 843 | cpu_buffer->commit_page->page->commit = |
774 | cpu_buffer->commit_page->write; | 844 | cpu_buffer->commit_page->write; |
775 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); | 845 | rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); |
776 | cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp; | 846 | cpu_buffer->write_stamp = |
847 | cpu_buffer->commit_page->page->time_stamp; | ||
777 | /* add barrier to keep gcc from optimizing too much */ | 848 | /* add barrier to keep gcc from optimizing too much */ |
778 | barrier(); | 849 | barrier(); |
779 | } | 850 | } |
780 | while (rb_commit_index(cpu_buffer) != | 851 | while (rb_commit_index(cpu_buffer) != |
781 | rb_page_write(cpu_buffer->commit_page)) { | 852 | rb_page_write(cpu_buffer->commit_page)) { |
782 | cpu_buffer->commit_page->commit = | 853 | cpu_buffer->commit_page->page->commit = |
783 | cpu_buffer->commit_page->write; | 854 | cpu_buffer->commit_page->write; |
784 | barrier(); | 855 | barrier(); |
785 | } | 856 | } |
857 | |||
858 | /* again, keep gcc from optimizing */ | ||
859 | barrier(); | ||
860 | |||
861 | /* | ||
862 | * If an interrupt came in just after the first while loop | ||
863 | * and pushed the tail page forward, we will be left with | ||
864 | * a dangling commit that will never go forward. | ||
865 | */ | ||
866 | if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) | ||
867 | goto again; | ||
786 | } | 868 | } |
787 | 869 | ||
788 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | 870 | static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) |
789 | { | 871 | { |
790 | cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp; | 872 | cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; |
791 | cpu_buffer->reader_page->read = 0; | 873 | cpu_buffer->reader_page->read = 0; |
792 | } | 874 | } |
793 | 875 | ||
@@ -806,7 +888,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter) | |||
806 | else | 888 | else |
807 | rb_inc_page(cpu_buffer, &iter->head_page); | 889 | rb_inc_page(cpu_buffer, &iter->head_page); |
808 | 890 | ||
809 | iter->read_stamp = iter->head_page->time_stamp; | 891 | iter->read_stamp = iter->head_page->page->time_stamp; |
810 | iter->head = 0; | 892 | iter->head = 0; |
811 | } | 893 | } |
812 | 894 | ||
@@ -880,12 +962,15 @@ static struct ring_buffer_event * | |||
880 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 962 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
881 | unsigned type, unsigned long length, u64 *ts) | 963 | unsigned type, unsigned long length, u64 *ts) |
882 | { | 964 | { |
883 | struct buffer_page *tail_page, *head_page, *reader_page; | 965 | struct buffer_page *tail_page, *head_page, *reader_page, *commit_page; |
884 | unsigned long tail, write; | 966 | unsigned long tail, write; |
885 | struct ring_buffer *buffer = cpu_buffer->buffer; | 967 | struct ring_buffer *buffer = cpu_buffer->buffer; |
886 | struct ring_buffer_event *event; | 968 | struct ring_buffer_event *event; |
887 | unsigned long flags; | 969 | unsigned long flags; |
888 | 970 | ||
971 | commit_page = cpu_buffer->commit_page; | ||
972 | /* we just need to protect against interrupts */ | ||
973 | barrier(); | ||
889 | tail_page = cpu_buffer->tail_page; | 974 | tail_page = cpu_buffer->tail_page; |
890 | write = local_add_return(length, &tail_page->write); | 975 | write = local_add_return(length, &tail_page->write); |
891 | tail = write - length; | 976 | tail = write - length; |
@@ -894,7 +979,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
894 | if (write > BUF_PAGE_SIZE) { | 979 | if (write > BUF_PAGE_SIZE) { |
895 | struct buffer_page *next_page = tail_page; | 980 | struct buffer_page *next_page = tail_page; |
896 | 981 | ||
897 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 982 | local_irq_save(flags); |
983 | __raw_spin_lock(&cpu_buffer->lock); | ||
898 | 984 | ||
899 | rb_inc_page(cpu_buffer, &next_page); | 985 | rb_inc_page(cpu_buffer, &next_page); |
900 | 986 | ||
@@ -902,14 +988,15 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
902 | reader_page = cpu_buffer->reader_page; | 988 | reader_page = cpu_buffer->reader_page; |
903 | 989 | ||
904 | /* we grabbed the lock before incrementing */ | 990 | /* we grabbed the lock before incrementing */ |
905 | RB_WARN_ON(cpu_buffer, next_page == reader_page); | 991 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) |
992 | goto out_unlock; | ||
906 | 993 | ||
907 | /* | 994 | /* |
908 | * If for some reason, we had an interrupt storm that made | 995 | * If for some reason, we had an interrupt storm that made |
909 | * it all the way around the buffer, bail, and warn | 996 | * it all the way around the buffer, bail, and warn |
910 | * about it. | 997 | * about it. |
911 | */ | 998 | */ |
912 | if (unlikely(next_page == cpu_buffer->commit_page)) { | 999 | if (unlikely(next_page == commit_page)) { |
913 | WARN_ON_ONCE(1); | 1000 | WARN_ON_ONCE(1); |
914 | goto out_unlock; | 1001 | goto out_unlock; |
915 | } | 1002 | } |
@@ -940,12 +1027,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
940 | */ | 1027 | */ |
941 | if (tail_page == cpu_buffer->tail_page) { | 1028 | if (tail_page == cpu_buffer->tail_page) { |
942 | local_set(&next_page->write, 0); | 1029 | local_set(&next_page->write, 0); |
943 | local_set(&next_page->commit, 0); | 1030 | local_set(&next_page->page->commit, 0); |
944 | cpu_buffer->tail_page = next_page; | 1031 | cpu_buffer->tail_page = next_page; |
945 | 1032 | ||
946 | /* reread the time stamp */ | 1033 | /* reread the time stamp */ |
947 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1034 | *ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
948 | cpu_buffer->tail_page->time_stamp = *ts; | 1035 | cpu_buffer->tail_page->page->time_stamp = *ts; |
949 | } | 1036 | } |
950 | 1037 | ||
951 | /* | 1038 | /* |
@@ -970,7 +1057,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
970 | rb_set_commit_to_write(cpu_buffer); | 1057 | rb_set_commit_to_write(cpu_buffer); |
971 | } | 1058 | } |
972 | 1059 | ||
973 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1060 | __raw_spin_unlock(&cpu_buffer->lock); |
1061 | local_irq_restore(flags); | ||
974 | 1062 | ||
975 | /* fail and let the caller try again */ | 1063 | /* fail and let the caller try again */ |
976 | return ERR_PTR(-EAGAIN); | 1064 | return ERR_PTR(-EAGAIN); |
@@ -978,7 +1066,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
978 | 1066 | ||
979 | /* We reserved something on the buffer */ | 1067 | /* We reserved something on the buffer */ |
980 | 1068 | ||
981 | BUG_ON(write > BUF_PAGE_SIZE); | 1069 | if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE)) |
1070 | return NULL; | ||
982 | 1071 | ||
983 | event = __rb_page_index(tail_page, tail); | 1072 | event = __rb_page_index(tail_page, tail); |
984 | rb_update_event(event, type, length); | 1073 | rb_update_event(event, type, length); |
@@ -988,12 +1077,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
988 | * this page's time stamp. | 1077 | * this page's time stamp. |
989 | */ | 1078 | */ |
990 | if (!tail && rb_is_commit(cpu_buffer, event)) | 1079 | if (!tail && rb_is_commit(cpu_buffer, event)) |
991 | cpu_buffer->commit_page->time_stamp = *ts; | 1080 | cpu_buffer->commit_page->page->time_stamp = *ts; |
992 | 1081 | ||
993 | return event; | 1082 | return event; |
994 | 1083 | ||
995 | out_unlock: | 1084 | out_unlock: |
996 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1085 | __raw_spin_unlock(&cpu_buffer->lock); |
1086 | local_irq_restore(flags); | ||
997 | return NULL; | 1087 | return NULL; |
998 | } | 1088 | } |
999 | 1089 | ||
@@ -1038,7 +1128,7 @@ rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer, | |||
1038 | event->time_delta = *delta & TS_MASK; | 1128 | event->time_delta = *delta & TS_MASK; |
1039 | event->array[0] = *delta >> TS_SHIFT; | 1129 | event->array[0] = *delta >> TS_SHIFT; |
1040 | } else { | 1130 | } else { |
1041 | cpu_buffer->commit_page->time_stamp = *ts; | 1131 | cpu_buffer->commit_page->page->time_stamp = *ts; |
1042 | event->time_delta = 0; | 1132 | event->time_delta = 0; |
1043 | event->array[0] = 0; | 1133 | event->array[0] = 0; |
1044 | } | 1134 | } |
@@ -1076,10 +1166,8 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1076 | * storm or we have something buggy. | 1166 | * storm or we have something buggy. |
1077 | * Bail! | 1167 | * Bail! |
1078 | */ | 1168 | */ |
1079 | if (unlikely(++nr_loops > 1000)) { | 1169 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) |
1080 | RB_WARN_ON(cpu_buffer, 1); | ||
1081 | return NULL; | 1170 | return NULL; |
1082 | } | ||
1083 | 1171 | ||
1084 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); | 1172 | ts = ring_buffer_time_stamp(cpu_buffer->cpu); |
1085 | 1173 | ||
@@ -1175,15 +1263,14 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1175 | struct ring_buffer_event *event; | 1263 | struct ring_buffer_event *event; |
1176 | int cpu, resched; | 1264 | int cpu, resched; |
1177 | 1265 | ||
1178 | if (ring_buffers_off) | 1266 | if (ring_buffer_flags != RB_BUFFERS_ON) |
1179 | return NULL; | 1267 | return NULL; |
1180 | 1268 | ||
1181 | if (atomic_read(&buffer->record_disabled)) | 1269 | if (atomic_read(&buffer->record_disabled)) |
1182 | return NULL; | 1270 | return NULL; |
1183 | 1271 | ||
1184 | /* If we are tracing schedule, we don't want to recurse */ | 1272 | /* If we are tracing schedule, we don't want to recurse */ |
1185 | resched = need_resched(); | 1273 | resched = ftrace_preempt_disable(); |
1186 | preempt_disable_notrace(); | ||
1187 | 1274 | ||
1188 | cpu = raw_smp_processor_id(); | 1275 | cpu = raw_smp_processor_id(); |
1189 | 1276 | ||
@@ -1214,10 +1301,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, | |||
1214 | return event; | 1301 | return event; |
1215 | 1302 | ||
1216 | out: | 1303 | out: |
1217 | if (resched) | 1304 | ftrace_preempt_enable(resched); |
1218 | preempt_enable_no_resched_notrace(); | ||
1219 | else | ||
1220 | preempt_enable_notrace(); | ||
1221 | return NULL; | 1305 | return NULL; |
1222 | } | 1306 | } |
1223 | 1307 | ||
@@ -1259,12 +1343,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1259 | /* | 1343 | /* |
1260 | * Only the last preempt count needs to restore preemption. | 1344 | * Only the last preempt count needs to restore preemption. |
1261 | */ | 1345 | */ |
1262 | if (preempt_count() == 1) { | 1346 | if (preempt_count() == 1) |
1263 | if (per_cpu(rb_need_resched, cpu)) | 1347 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); |
1264 | preempt_enable_no_resched_notrace(); | 1348 | else |
1265 | else | ||
1266 | preempt_enable_notrace(); | ||
1267 | } else | ||
1268 | preempt_enable_no_resched_notrace(); | 1349 | preempt_enable_no_resched_notrace(); |
1269 | 1350 | ||
1270 | return 0; | 1351 | return 0; |
@@ -1294,14 +1375,13 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1294 | int ret = -EBUSY; | 1375 | int ret = -EBUSY; |
1295 | int cpu, resched; | 1376 | int cpu, resched; |
1296 | 1377 | ||
1297 | if (ring_buffers_off) | 1378 | if (ring_buffer_flags != RB_BUFFERS_ON) |
1298 | return -EBUSY; | 1379 | return -EBUSY; |
1299 | 1380 | ||
1300 | if (atomic_read(&buffer->record_disabled)) | 1381 | if (atomic_read(&buffer->record_disabled)) |
1301 | return -EBUSY; | 1382 | return -EBUSY; |
1302 | 1383 | ||
1303 | resched = need_resched(); | 1384 | resched = ftrace_preempt_disable(); |
1304 | preempt_disable_notrace(); | ||
1305 | 1385 | ||
1306 | cpu = raw_smp_processor_id(); | 1386 | cpu = raw_smp_processor_id(); |
1307 | 1387 | ||
@@ -1327,10 +1407,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1327 | 1407 | ||
1328 | ret = 0; | 1408 | ret = 0; |
1329 | out: | 1409 | out: |
1330 | if (resched) | 1410 | ftrace_preempt_enable(resched); |
1331 | preempt_enable_no_resched_notrace(); | ||
1332 | else | ||
1333 | preempt_enable_notrace(); | ||
1334 | 1411 | ||
1335 | return ret; | 1412 | return ret; |
1336 | } | 1413 | } |
@@ -1489,14 +1566,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) | |||
1489 | return overruns; | 1566 | return overruns; |
1490 | } | 1567 | } |
1491 | 1568 | ||
1492 | /** | 1569 | static void rb_iter_reset(struct ring_buffer_iter *iter) |
1493 | * ring_buffer_iter_reset - reset an iterator | ||
1494 | * @iter: The iterator to reset | ||
1495 | * | ||
1496 | * Resets the iterator, so that it will start from the beginning | ||
1497 | * again. | ||
1498 | */ | ||
1499 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | ||
1500 | { | 1570 | { |
1501 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 1571 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
1502 | 1572 | ||
@@ -1511,7 +1581,24 @@ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | |||
1511 | if (iter->head) | 1581 | if (iter->head) |
1512 | iter->read_stamp = cpu_buffer->read_stamp; | 1582 | iter->read_stamp = cpu_buffer->read_stamp; |
1513 | else | 1583 | else |
1514 | iter->read_stamp = iter->head_page->time_stamp; | 1584 | iter->read_stamp = iter->head_page->page->time_stamp; |
1585 | } | ||
1586 | |||
1587 | /** | ||
1588 | * ring_buffer_iter_reset - reset an iterator | ||
1589 | * @iter: The iterator to reset | ||
1590 | * | ||
1591 | * Resets the iterator, so that it will start from the beginning | ||
1592 | * again. | ||
1593 | */ | ||
1594 | void ring_buffer_iter_reset(struct ring_buffer_iter *iter) | ||
1595 | { | ||
1596 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
1597 | unsigned long flags; | ||
1598 | |||
1599 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1600 | rb_iter_reset(iter); | ||
1601 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1515 | } | 1602 | } |
1516 | 1603 | ||
1517 | /** | 1604 | /** |
@@ -1597,7 +1684,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1597 | unsigned long flags; | 1684 | unsigned long flags; |
1598 | int nr_loops = 0; | 1685 | int nr_loops = 0; |
1599 | 1686 | ||
1600 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 1687 | local_irq_save(flags); |
1688 | __raw_spin_lock(&cpu_buffer->lock); | ||
1601 | 1689 | ||
1602 | again: | 1690 | again: |
1603 | /* | 1691 | /* |
@@ -1606,8 +1694,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1606 | * a case where we will loop three times. There should be no | 1694 | * a case where we will loop three times. There should be no |
1607 | * reason to loop four times (that I know of). | 1695 | * reason to loop four times (that I know of). |
1608 | */ | 1696 | */ |
1609 | if (unlikely(++nr_loops > 3)) { | 1697 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { |
1610 | RB_WARN_ON(cpu_buffer, 1); | ||
1611 | reader = NULL; | 1698 | reader = NULL; |
1612 | goto out; | 1699 | goto out; |
1613 | } | 1700 | } |
@@ -1619,8 +1706,9 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1619 | goto out; | 1706 | goto out; |
1620 | 1707 | ||
1621 | /* Never should we have an index greater than the size */ | 1708 | /* Never should we have an index greater than the size */ |
1622 | RB_WARN_ON(cpu_buffer, | 1709 | if (RB_WARN_ON(cpu_buffer, |
1623 | cpu_buffer->reader_page->read > rb_page_size(reader)); | 1710 | cpu_buffer->reader_page->read > rb_page_size(reader))) |
1711 | goto out; | ||
1624 | 1712 | ||
1625 | /* check if we caught up to the tail */ | 1713 | /* check if we caught up to the tail */ |
1626 | reader = NULL; | 1714 | reader = NULL; |
@@ -1637,7 +1725,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1637 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 1725 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
1638 | 1726 | ||
1639 | local_set(&cpu_buffer->reader_page->write, 0); | 1727 | local_set(&cpu_buffer->reader_page->write, 0); |
1640 | local_set(&cpu_buffer->reader_page->commit, 0); | 1728 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
1641 | 1729 | ||
1642 | /* Make the reader page now replace the head */ | 1730 | /* Make the reader page now replace the head */ |
1643 | reader->list.prev->next = &cpu_buffer->reader_page->list; | 1731 | reader->list.prev->next = &cpu_buffer->reader_page->list; |
@@ -1659,7 +1747,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
1659 | goto again; | 1747 | goto again; |
1660 | 1748 | ||
1661 | out: | 1749 | out: |
1662 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 1750 | __raw_spin_unlock(&cpu_buffer->lock); |
1751 | local_irq_restore(flags); | ||
1663 | 1752 | ||
1664 | return reader; | 1753 | return reader; |
1665 | } | 1754 | } |
@@ -1673,7 +1762,8 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) | |||
1673 | reader = rb_get_reader_page(cpu_buffer); | 1762 | reader = rb_get_reader_page(cpu_buffer); |
1674 | 1763 | ||
1675 | /* This function should not be called when buffer is empty */ | 1764 | /* This function should not be called when buffer is empty */ |
1676 | BUG_ON(!reader); | 1765 | if (RB_WARN_ON(cpu_buffer, !reader)) |
1766 | return; | ||
1677 | 1767 | ||
1678 | event = rb_reader_event(cpu_buffer); | 1768 | event = rb_reader_event(cpu_buffer); |
1679 | 1769 | ||
@@ -1700,7 +1790,9 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1700 | * Check if we are at the end of the buffer. | 1790 | * Check if we are at the end of the buffer. |
1701 | */ | 1791 | */ |
1702 | if (iter->head >= rb_page_size(iter->head_page)) { | 1792 | if (iter->head >= rb_page_size(iter->head_page)) { |
1703 | BUG_ON(iter->head_page == cpu_buffer->commit_page); | 1793 | if (RB_WARN_ON(buffer, |
1794 | iter->head_page == cpu_buffer->commit_page)) | ||
1795 | return; | ||
1704 | rb_inc_iter(iter); | 1796 | rb_inc_iter(iter); |
1705 | return; | 1797 | return; |
1706 | } | 1798 | } |
@@ -1713,8 +1805,10 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1713 | * This should not be called to advance the header if we are | 1805 | * This should not be called to advance the header if we are |
1714 | * at the tail of the buffer. | 1806 | * at the tail of the buffer. |
1715 | */ | 1807 | */ |
1716 | BUG_ON((iter->head_page == cpu_buffer->commit_page) && | 1808 | if (RB_WARN_ON(cpu_buffer, |
1717 | (iter->head + length > rb_commit_index(cpu_buffer))); | 1809 | (iter->head_page == cpu_buffer->commit_page) && |
1810 | (iter->head + length > rb_commit_index(cpu_buffer)))) | ||
1811 | return; | ||
1718 | 1812 | ||
1719 | rb_update_iter_read_stamp(iter, event); | 1813 | rb_update_iter_read_stamp(iter, event); |
1720 | 1814 | ||
@@ -1726,17 +1820,8 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) | |||
1726 | rb_advance_iter(iter); | 1820 | rb_advance_iter(iter); |
1727 | } | 1821 | } |
1728 | 1822 | ||
1729 | /** | 1823 | static struct ring_buffer_event * |
1730 | * ring_buffer_peek - peek at the next event to be read | 1824 | rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) |
1731 | * @buffer: The ring buffer to read | ||
1732 | * @cpu: The cpu to peak at | ||
1733 | * @ts: The timestamp counter of this event. | ||
1734 | * | ||
1735 | * This will return the event that will be read next, but does | ||
1736 | * not consume the data. | ||
1737 | */ | ||
1738 | struct ring_buffer_event * | ||
1739 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | ||
1740 | { | 1825 | { |
1741 | struct ring_buffer_per_cpu *cpu_buffer; | 1826 | struct ring_buffer_per_cpu *cpu_buffer; |
1742 | struct ring_buffer_event *event; | 1827 | struct ring_buffer_event *event; |
@@ -1757,10 +1842,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1757 | * can have. Nesting 10 deep of interrupts is clearly | 1842 | * can have. Nesting 10 deep of interrupts is clearly |
1758 | * an anomaly. | 1843 | * an anomaly. |
1759 | */ | 1844 | */ |
1760 | if (unlikely(++nr_loops > 10)) { | 1845 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1761 | RB_WARN_ON(cpu_buffer, 1); | ||
1762 | return NULL; | 1846 | return NULL; |
1763 | } | ||
1764 | 1847 | ||
1765 | reader = rb_get_reader_page(cpu_buffer); | 1848 | reader = rb_get_reader_page(cpu_buffer); |
1766 | if (!reader) | 1849 | if (!reader) |
@@ -1798,16 +1881,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | |||
1798 | return NULL; | 1881 | return NULL; |
1799 | } | 1882 | } |
1800 | 1883 | ||
1801 | /** | 1884 | static struct ring_buffer_event * |
1802 | * ring_buffer_iter_peek - peek at the next event to be read | 1885 | rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) |
1803 | * @iter: The ring buffer iterator | ||
1804 | * @ts: The timestamp counter of this event. | ||
1805 | * | ||
1806 | * This will return the event that will be read next, but does | ||
1807 | * not increment the iterator. | ||
1808 | */ | ||
1809 | struct ring_buffer_event * | ||
1810 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | ||
1811 | { | 1886 | { |
1812 | struct ring_buffer *buffer; | 1887 | struct ring_buffer *buffer; |
1813 | struct ring_buffer_per_cpu *cpu_buffer; | 1888 | struct ring_buffer_per_cpu *cpu_buffer; |
@@ -1829,10 +1904,8 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1829 | * can have. Nesting 10 deep of interrupts is clearly | 1904 | * can have. Nesting 10 deep of interrupts is clearly |
1830 | * an anomaly. | 1905 | * an anomaly. |
1831 | */ | 1906 | */ |
1832 | if (unlikely(++nr_loops > 10)) { | 1907 | if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10)) |
1833 | RB_WARN_ON(cpu_buffer, 1); | ||
1834 | return NULL; | 1908 | return NULL; |
1835 | } | ||
1836 | 1909 | ||
1837 | if (rb_per_cpu_empty(cpu_buffer)) | 1910 | if (rb_per_cpu_empty(cpu_buffer)) |
1838 | return NULL; | 1911 | return NULL; |
@@ -1869,6 +1942,51 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1869 | } | 1942 | } |
1870 | 1943 | ||
1871 | /** | 1944 | /** |
1945 | * ring_buffer_peek - peek at the next event to be read | ||
1946 | * @buffer: The ring buffer to read | ||
1947 | * @cpu: The cpu to peak at | ||
1948 | * @ts: The timestamp counter of this event. | ||
1949 | * | ||
1950 | * This will return the event that will be read next, but does | ||
1951 | * not consume the data. | ||
1952 | */ | ||
1953 | struct ring_buffer_event * | ||
1954 | ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) | ||
1955 | { | ||
1956 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
1957 | struct ring_buffer_event *event; | ||
1958 | unsigned long flags; | ||
1959 | |||
1960 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1961 | event = rb_buffer_peek(buffer, cpu, ts); | ||
1962 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1963 | |||
1964 | return event; | ||
1965 | } | ||
1966 | |||
1967 | /** | ||
1968 | * ring_buffer_iter_peek - peek at the next event to be read | ||
1969 | * @iter: The ring buffer iterator | ||
1970 | * @ts: The timestamp counter of this event. | ||
1971 | * | ||
1972 | * This will return the event that will be read next, but does | ||
1973 | * not increment the iterator. | ||
1974 | */ | ||
1975 | struct ring_buffer_event * | ||
1976 | ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | ||
1977 | { | ||
1978 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
1979 | struct ring_buffer_event *event; | ||
1980 | unsigned long flags; | ||
1981 | |||
1982 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
1983 | event = rb_iter_peek(iter, ts); | ||
1984 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1985 | |||
1986 | return event; | ||
1987 | } | ||
1988 | |||
1989 | /** | ||
1872 | * ring_buffer_consume - return an event and consume it | 1990 | * ring_buffer_consume - return an event and consume it |
1873 | * @buffer: The ring buffer to get the next event from | 1991 | * @buffer: The ring buffer to get the next event from |
1874 | * | 1992 | * |
@@ -1879,19 +1997,24 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
1879 | struct ring_buffer_event * | 1997 | struct ring_buffer_event * |
1880 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) | 1998 | ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) |
1881 | { | 1999 | { |
1882 | struct ring_buffer_per_cpu *cpu_buffer; | 2000 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
1883 | struct ring_buffer_event *event; | 2001 | struct ring_buffer_event *event; |
2002 | unsigned long flags; | ||
1884 | 2003 | ||
1885 | if (!cpu_isset(cpu, buffer->cpumask)) | 2004 | if (!cpu_isset(cpu, buffer->cpumask)) |
1886 | return NULL; | 2005 | return NULL; |
1887 | 2006 | ||
1888 | event = ring_buffer_peek(buffer, cpu, ts); | 2007 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2008 | |||
2009 | event = rb_buffer_peek(buffer, cpu, ts); | ||
1889 | if (!event) | 2010 | if (!event) |
1890 | return NULL; | 2011 | goto out; |
1891 | 2012 | ||
1892 | cpu_buffer = buffer->buffers[cpu]; | ||
1893 | rb_advance_reader(cpu_buffer); | 2013 | rb_advance_reader(cpu_buffer); |
1894 | 2014 | ||
2015 | out: | ||
2016 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2017 | |||
1895 | return event; | 2018 | return event; |
1896 | } | 2019 | } |
1897 | 2020 | ||
@@ -1928,9 +2051,11 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
1928 | atomic_inc(&cpu_buffer->record_disabled); | 2051 | atomic_inc(&cpu_buffer->record_disabled); |
1929 | synchronize_sched(); | 2052 | synchronize_sched(); |
1930 | 2053 | ||
1931 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 2054 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
1932 | ring_buffer_iter_reset(iter); | 2055 | __raw_spin_lock(&cpu_buffer->lock); |
1933 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 2056 | rb_iter_reset(iter); |
2057 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2058 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1934 | 2059 | ||
1935 | return iter; | 2060 | return iter; |
1936 | } | 2061 | } |
@@ -1962,12 +2087,17 @@ struct ring_buffer_event * | |||
1962 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) | 2087 | ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) |
1963 | { | 2088 | { |
1964 | struct ring_buffer_event *event; | 2089 | struct ring_buffer_event *event; |
2090 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | ||
2091 | unsigned long flags; | ||
1965 | 2092 | ||
1966 | event = ring_buffer_iter_peek(iter, ts); | 2093 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2094 | event = rb_iter_peek(iter, ts); | ||
1967 | if (!event) | 2095 | if (!event) |
1968 | return NULL; | 2096 | goto out; |
1969 | 2097 | ||
1970 | rb_advance_iter(iter); | 2098 | rb_advance_iter(iter); |
2099 | out: | ||
2100 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
1971 | 2101 | ||
1972 | return event; | 2102 | return event; |
1973 | } | 2103 | } |
@@ -1987,7 +2117,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
1987 | cpu_buffer->head_page | 2117 | cpu_buffer->head_page |
1988 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); | 2118 | = list_entry(cpu_buffer->pages.next, struct buffer_page, list); |
1989 | local_set(&cpu_buffer->head_page->write, 0); | 2119 | local_set(&cpu_buffer->head_page->write, 0); |
1990 | local_set(&cpu_buffer->head_page->commit, 0); | 2120 | local_set(&cpu_buffer->head_page->page->commit, 0); |
1991 | 2121 | ||
1992 | cpu_buffer->head_page->read = 0; | 2122 | cpu_buffer->head_page->read = 0; |
1993 | 2123 | ||
@@ -1996,7 +2126,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) | |||
1996 | 2126 | ||
1997 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 2127 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
1998 | local_set(&cpu_buffer->reader_page->write, 0); | 2128 | local_set(&cpu_buffer->reader_page->write, 0); |
1999 | local_set(&cpu_buffer->reader_page->commit, 0); | 2129 | local_set(&cpu_buffer->reader_page->page->commit, 0); |
2000 | cpu_buffer->reader_page->read = 0; | 2130 | cpu_buffer->reader_page->read = 0; |
2001 | 2131 | ||
2002 | cpu_buffer->overrun = 0; | 2132 | cpu_buffer->overrun = 0; |
@@ -2016,11 +2146,15 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
2016 | if (!cpu_isset(cpu, buffer->cpumask)) | 2146 | if (!cpu_isset(cpu, buffer->cpumask)) |
2017 | return; | 2147 | return; |
2018 | 2148 | ||
2019 | spin_lock_irqsave(&cpu_buffer->lock, flags); | 2149 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2150 | |||
2151 | __raw_spin_lock(&cpu_buffer->lock); | ||
2020 | 2152 | ||
2021 | rb_reset_cpu(cpu_buffer); | 2153 | rb_reset_cpu(cpu_buffer); |
2022 | 2154 | ||
2023 | spin_unlock_irqrestore(&cpu_buffer->lock, flags); | 2155 | __raw_spin_unlock(&cpu_buffer->lock); |
2156 | |||
2157 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2024 | } | 2158 | } |
2025 | 2159 | ||
2026 | /** | 2160 | /** |
@@ -2090,8 +2224,7 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2090 | return -EINVAL; | 2224 | return -EINVAL; |
2091 | 2225 | ||
2092 | /* At least make sure the two buffers are somewhat the same */ | 2226 | /* At least make sure the two buffers are somewhat the same */ |
2093 | if (buffer_a->size != buffer_b->size || | 2227 | if (buffer_a->pages != buffer_b->pages) |
2094 | buffer_a->pages != buffer_b->pages) | ||
2095 | return -EINVAL; | 2228 | return -EINVAL; |
2096 | 2229 | ||
2097 | cpu_buffer_a = buffer_a->buffers[cpu]; | 2230 | cpu_buffer_a = buffer_a->buffers[cpu]; |
@@ -2118,16 +2251,178 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2118 | return 0; | 2251 | return 0; |
2119 | } | 2252 | } |
2120 | 2253 | ||
2254 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | ||
2255 | struct buffer_data_page *bpage) | ||
2256 | { | ||
2257 | struct ring_buffer_event *event; | ||
2258 | unsigned long head; | ||
2259 | |||
2260 | __raw_spin_lock(&cpu_buffer->lock); | ||
2261 | for (head = 0; head < local_read(&bpage->commit); | ||
2262 | head += rb_event_length(event)) { | ||
2263 | |||
2264 | event = __rb_data_page_index(bpage, head); | ||
2265 | if (RB_WARN_ON(cpu_buffer, rb_null_event(event))) | ||
2266 | return; | ||
2267 | /* Only count data entries */ | ||
2268 | if (event->type != RINGBUF_TYPE_DATA) | ||
2269 | continue; | ||
2270 | cpu_buffer->entries--; | ||
2271 | } | ||
2272 | __raw_spin_unlock(&cpu_buffer->lock); | ||
2273 | } | ||
2274 | |||
2275 | /** | ||
2276 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | ||
2277 | * @buffer: the buffer to allocate for. | ||
2278 | * | ||
2279 | * This function is used in conjunction with ring_buffer_read_page. | ||
2280 | * When reading a full page from the ring buffer, these functions | ||
2281 | * can be used to speed up the process. The calling function should | ||
2282 | * allocate a few pages first with this function. Then when it | ||
2283 | * needs to get pages from the ring buffer, it passes the result | ||
2284 | * of this function into ring_buffer_read_page, which will swap | ||
2285 | * the page that was allocated, with the read page of the buffer. | ||
2286 | * | ||
2287 | * Returns: | ||
2288 | * The page allocated, or NULL on error. | ||
2289 | */ | ||
2290 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | ||
2291 | { | ||
2292 | unsigned long addr; | ||
2293 | struct buffer_data_page *bpage; | ||
2294 | |||
2295 | addr = __get_free_page(GFP_KERNEL); | ||
2296 | if (!addr) | ||
2297 | return NULL; | ||
2298 | |||
2299 | bpage = (void *)addr; | ||
2300 | |||
2301 | return bpage; | ||
2302 | } | ||
2303 | |||
2304 | /** | ||
2305 | * ring_buffer_free_read_page - free an allocated read page | ||
2306 | * @buffer: the buffer the page was allocate for | ||
2307 | * @data: the page to free | ||
2308 | * | ||
2309 | * Free a page allocated from ring_buffer_alloc_read_page. | ||
2310 | */ | ||
2311 | void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | ||
2312 | { | ||
2313 | free_page((unsigned long)data); | ||
2314 | } | ||
2315 | |||
2316 | /** | ||
2317 | * ring_buffer_read_page - extract a page from the ring buffer | ||
2318 | * @buffer: buffer to extract from | ||
2319 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | ||
2320 | * @cpu: the cpu of the buffer to extract | ||
2321 | * @full: should the extraction only happen when the page is full. | ||
2322 | * | ||
2323 | * This function will pull out a page from the ring buffer and consume it. | ||
2324 | * @data_page must be the address of the variable that was returned | ||
2325 | * from ring_buffer_alloc_read_page. This is because the page might be used | ||
2326 | * to swap with a page in the ring buffer. | ||
2327 | * | ||
2328 | * for example: | ||
2329 | * rpage = ring_buffer_alloc_page(buffer); | ||
2330 | * if (!rpage) | ||
2331 | * return error; | ||
2332 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | ||
2333 | * if (ret) | ||
2334 | * process_page(rpage); | ||
2335 | * | ||
2336 | * When @full is set, the function will not return true unless | ||
2337 | * the writer is off the reader page. | ||
2338 | * | ||
2339 | * Note: it is up to the calling functions to handle sleeps and wakeups. | ||
2340 | * The ring buffer can be used anywhere in the kernel and can not | ||
2341 | * blindly call wake_up. The layer that uses the ring buffer must be | ||
2342 | * responsible for that. | ||
2343 | * | ||
2344 | * Returns: | ||
2345 | * 1 if data has been transferred | ||
2346 | * 0 if no data has been transferred. | ||
2347 | */ | ||
2348 | int ring_buffer_read_page(struct ring_buffer *buffer, | ||
2349 | void **data_page, int cpu, int full) | ||
2350 | { | ||
2351 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | ||
2352 | struct ring_buffer_event *event; | ||
2353 | struct buffer_data_page *bpage; | ||
2354 | unsigned long flags; | ||
2355 | int ret = 0; | ||
2356 | |||
2357 | if (!data_page) | ||
2358 | return 0; | ||
2359 | |||
2360 | bpage = *data_page; | ||
2361 | if (!bpage) | ||
2362 | return 0; | ||
2363 | |||
2364 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
2365 | |||
2366 | /* | ||
2367 | * rb_buffer_peek will get the next ring buffer if | ||
2368 | * the current reader page is empty. | ||
2369 | */ | ||
2370 | event = rb_buffer_peek(buffer, cpu, NULL); | ||
2371 | if (!event) | ||
2372 | goto out; | ||
2373 | |||
2374 | /* check for data */ | ||
2375 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | ||
2376 | goto out; | ||
2377 | /* | ||
2378 | * If the writer is already off of the read page, then simply | ||
2379 | * switch the read page with the given page. Otherwise | ||
2380 | * we need to copy the data from the reader to the writer. | ||
2381 | */ | ||
2382 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | ||
2383 | unsigned int read = cpu_buffer->reader_page->read; | ||
2384 | |||
2385 | if (full) | ||
2386 | goto out; | ||
2387 | /* The writer is still on the reader page, we must copy */ | ||
2388 | bpage = cpu_buffer->reader_page->page; | ||
2389 | memcpy(bpage->data, | ||
2390 | cpu_buffer->reader_page->page->data + read, | ||
2391 | local_read(&bpage->commit) - read); | ||
2392 | |||
2393 | /* consume what was read */ | ||
2394 | cpu_buffer->reader_page += read; | ||
2395 | |||
2396 | } else { | ||
2397 | /* swap the pages */ | ||
2398 | rb_init_page(bpage); | ||
2399 | bpage = cpu_buffer->reader_page->page; | ||
2400 | cpu_buffer->reader_page->page = *data_page; | ||
2401 | cpu_buffer->reader_page->read = 0; | ||
2402 | *data_page = bpage; | ||
2403 | } | ||
2404 | ret = 1; | ||
2405 | |||
2406 | /* update the entry counter */ | ||
2407 | rb_remove_entries(cpu_buffer, bpage); | ||
2408 | out: | ||
2409 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
2410 | |||
2411 | return ret; | ||
2412 | } | ||
2413 | |||
2121 | static ssize_t | 2414 | static ssize_t |
2122 | rb_simple_read(struct file *filp, char __user *ubuf, | 2415 | rb_simple_read(struct file *filp, char __user *ubuf, |
2123 | size_t cnt, loff_t *ppos) | 2416 | size_t cnt, loff_t *ppos) |
2124 | { | 2417 | { |
2125 | int *p = filp->private_data; | 2418 | long *p = filp->private_data; |
2126 | char buf[64]; | 2419 | char buf[64]; |
2127 | int r; | 2420 | int r; |
2128 | 2421 | ||
2129 | /* !ring_buffers_off == tracing_on */ | 2422 | if (test_bit(RB_BUFFERS_DISABLED_BIT, p)) |
2130 | r = sprintf(buf, "%d\n", !*p); | 2423 | r = sprintf(buf, "permanently disabled\n"); |
2424 | else | ||
2425 | r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p)); | ||
2131 | 2426 | ||
2132 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2427 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2133 | } | 2428 | } |
@@ -2136,7 +2431,7 @@ static ssize_t | |||
2136 | rb_simple_write(struct file *filp, const char __user *ubuf, | 2431 | rb_simple_write(struct file *filp, const char __user *ubuf, |
2137 | size_t cnt, loff_t *ppos) | 2432 | size_t cnt, loff_t *ppos) |
2138 | { | 2433 | { |
2139 | int *p = filp->private_data; | 2434 | long *p = filp->private_data; |
2140 | char buf[64]; | 2435 | char buf[64]; |
2141 | long val; | 2436 | long val; |
2142 | int ret; | 2437 | int ret; |
@@ -2153,8 +2448,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
2153 | if (ret < 0) | 2448 | if (ret < 0) |
2154 | return ret; | 2449 | return ret; |
2155 | 2450 | ||
2156 | /* !ring_buffers_off == tracing_on */ | 2451 | if (val) |
2157 | *p = !val; | 2452 | set_bit(RB_BUFFERS_ON_BIT, p); |
2453 | else | ||
2454 | clear_bit(RB_BUFFERS_ON_BIT, p); | ||
2158 | 2455 | ||
2159 | (*ppos)++; | 2456 | (*ppos)++; |
2160 | 2457 | ||
@@ -2176,7 +2473,7 @@ static __init int rb_init_debugfs(void) | |||
2176 | d_tracer = tracing_init_dentry(); | 2473 | d_tracer = tracing_init_dentry(); |
2177 | 2474 | ||
2178 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, | 2475 | entry = debugfs_create_file("tracing_on", 0644, d_tracer, |
2179 | &ring_buffers_off, &rb_simple_fops); | 2476 | &ring_buffer_flags, &rb_simple_fops); |
2180 | if (!entry) | 2477 | if (!entry) |
2181 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); | 2478 | pr_warning("Could not create debugfs 'tracing_on' entry\n"); |
2182 | 2479 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d86e3252f300..f4bb3800318b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/gfp.h> | 30 | #include <linux/gfp.h> |
31 | #include <linux/fs.h> | 31 | #include <linux/fs.h> |
32 | #include <linux/kprobes.h> | 32 | #include <linux/kprobes.h> |
33 | #include <linux/seq_file.h> | ||
33 | #include <linux/writeback.h> | 34 | #include <linux/writeback.h> |
34 | 35 | ||
35 | #include <linux/stacktrace.h> | 36 | #include <linux/stacktrace.h> |
@@ -43,6 +44,38 @@ | |||
43 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; | 44 | unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; |
44 | unsigned long __read_mostly tracing_thresh; | 45 | unsigned long __read_mostly tracing_thresh; |
45 | 46 | ||
47 | /* | ||
48 | * We need to change this state when a selftest is running. | ||
49 | * A selftest will lurk into the ring-buffer to count the | ||
50 | * entries inserted during the selftest although some concurrent | ||
51 | * insertions into the ring-buffer such as ftrace_printk could occurred | ||
52 | * at the same time, giving false positive or negative results. | ||
53 | */ | ||
54 | static bool __read_mostly tracing_selftest_running; | ||
55 | |||
56 | /* For tracers that don't implement custom flags */ | ||
57 | static struct tracer_opt dummy_tracer_opt[] = { | ||
58 | { } | ||
59 | }; | ||
60 | |||
61 | static struct tracer_flags dummy_tracer_flags = { | ||
62 | .val = 0, | ||
63 | .opts = dummy_tracer_opt | ||
64 | }; | ||
65 | |||
66 | static int dummy_set_flag(u32 old_flags, u32 bit, int set) | ||
67 | { | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Kill all tracing for good (never come back). | ||
73 | * It is initialized to 1 but will turn to zero if the initialization | ||
74 | * of the tracer is successful. But that is the only place that sets | ||
75 | * this back to zero. | ||
76 | */ | ||
77 | int tracing_disabled = 1; | ||
78 | |||
46 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 79 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
47 | 80 | ||
48 | static inline void ftrace_disable_cpu(void) | 81 | static inline void ftrace_disable_cpu(void) |
@@ -62,7 +95,36 @@ static cpumask_t __read_mostly tracing_buffer_mask; | |||
62 | #define for_each_tracing_cpu(cpu) \ | 95 | #define for_each_tracing_cpu(cpu) \ |
63 | for_each_cpu_mask(cpu, tracing_buffer_mask) | 96 | for_each_cpu_mask(cpu, tracing_buffer_mask) |
64 | 97 | ||
65 | static int tracing_disabled = 1; | 98 | /* |
99 | * ftrace_dump_on_oops - variable to dump ftrace buffer on oops | ||
100 | * | ||
101 | * If there is an oops (or kernel panic) and the ftrace_dump_on_oops | ||
102 | * is set, then ftrace_dump is called. This will output the contents | ||
103 | * of the ftrace buffers to the console. This is very useful for | ||
104 | * capturing traces that lead to crashes and outputing it to a | ||
105 | * serial console. | ||
106 | * | ||
107 | * It is default off, but you can enable it with either specifying | ||
108 | * "ftrace_dump_on_oops" in the kernel command line, or setting | ||
109 | * /proc/sys/kernel/ftrace_dump_on_oops to true. | ||
110 | */ | ||
111 | int ftrace_dump_on_oops; | ||
112 | |||
113 | static int tracing_set_tracer(char *buf); | ||
114 | |||
115 | static int __init set_ftrace(char *str) | ||
116 | { | ||
117 | tracing_set_tracer(str); | ||
118 | return 1; | ||
119 | } | ||
120 | __setup("ftrace", set_ftrace); | ||
121 | |||
122 | static int __init set_ftrace_dump_on_oops(char *str) | ||
123 | { | ||
124 | ftrace_dump_on_oops = 1; | ||
125 | return 1; | ||
126 | } | ||
127 | __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | ||
66 | 128 | ||
67 | long | 129 | long |
68 | ns2usecs(cycle_t nsec) | 130 | ns2usecs(cycle_t nsec) |
@@ -112,6 +174,19 @@ static DEFINE_PER_CPU(struct trace_array_cpu, max_data); | |||
112 | /* tracer_enabled is used to toggle activation of a tracer */ | 174 | /* tracer_enabled is used to toggle activation of a tracer */ |
113 | static int tracer_enabled = 1; | 175 | static int tracer_enabled = 1; |
114 | 176 | ||
177 | /** | ||
178 | * tracing_is_enabled - return tracer_enabled status | ||
179 | * | ||
180 | * This function is used by other tracers to know the status | ||
181 | * of the tracer_enabled flag. Tracers may use this function | ||
182 | * to know if it should enable their features when starting | ||
183 | * up. See irqsoff tracer for an example (start_irqsoff_tracer). | ||
184 | */ | ||
185 | int tracing_is_enabled(void) | ||
186 | { | ||
187 | return tracer_enabled; | ||
188 | } | ||
189 | |||
115 | /* function tracing enabled */ | 190 | /* function tracing enabled */ |
116 | int ftrace_function_enabled; | 191 | int ftrace_function_enabled; |
117 | 192 | ||
@@ -153,8 +228,9 @@ static DEFINE_MUTEX(trace_types_lock); | |||
153 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ | 228 | /* trace_wait is a waitqueue for tasks blocked on trace_poll */ |
154 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | 229 | static DECLARE_WAIT_QUEUE_HEAD(trace_wait); |
155 | 230 | ||
156 | /* trace_flags holds iter_ctrl options */ | 231 | /* trace_flags holds trace_options default values */ |
157 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; | 232 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
233 | TRACE_ITER_ANNOTATE; | ||
158 | 234 | ||
159 | /** | 235 | /** |
160 | * trace_wake_up - wake up tasks waiting for trace input | 236 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -193,13 +269,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) | |||
193 | return nsecs / 1000; | 269 | return nsecs / 1000; |
194 | } | 270 | } |
195 | 271 | ||
196 | /* | ||
197 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
198 | * control the output of kernel symbols. | ||
199 | */ | ||
200 | #define TRACE_ITER_SYM_MASK \ | ||
201 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
202 | |||
203 | /* These must match the bit postions in trace_iterator_flags */ | 272 | /* These must match the bit postions in trace_iterator_flags */ |
204 | static const char *trace_options[] = { | 273 | static const char *trace_options[] = { |
205 | "print-parent", | 274 | "print-parent", |
@@ -213,6 +282,12 @@ static const char *trace_options[] = { | |||
213 | "stacktrace", | 282 | "stacktrace", |
214 | "sched-tree", | 283 | "sched-tree", |
215 | "ftrace_printk", | 284 | "ftrace_printk", |
285 | "ftrace_preempt", | ||
286 | "branch", | ||
287 | "annotate", | ||
288 | "userstacktrace", | ||
289 | "sym-userobj", | ||
290 | "printk-msg-only", | ||
216 | NULL | 291 | NULL |
217 | }; | 292 | }; |
218 | 293 | ||
@@ -246,7 +321,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
246 | 321 | ||
247 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); | 322 | memcpy(data->comm, tsk->comm, TASK_COMM_LEN); |
248 | data->pid = tsk->pid; | 323 | data->pid = tsk->pid; |
249 | data->uid = tsk->uid; | 324 | data->uid = task_uid(tsk); |
250 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; | 325 | data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; |
251 | data->policy = tsk->policy; | 326 | data->policy = tsk->policy; |
252 | data->rt_priority = tsk->rt_priority; | 327 | data->rt_priority = tsk->rt_priority; |
@@ -359,6 +434,28 @@ trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | |||
359 | return trace_seq_putmem(s, hex, j); | 434 | return trace_seq_putmem(s, hex, j); |
360 | } | 435 | } |
361 | 436 | ||
437 | static int | ||
438 | trace_seq_path(struct trace_seq *s, struct path *path) | ||
439 | { | ||
440 | unsigned char *p; | ||
441 | |||
442 | if (s->len >= (PAGE_SIZE - 1)) | ||
443 | return 0; | ||
444 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
445 | if (!IS_ERR(p)) { | ||
446 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
447 | if (p) { | ||
448 | s->len = p - s->buffer; | ||
449 | return 1; | ||
450 | } | ||
451 | } else { | ||
452 | s->buffer[s->len++] = '?'; | ||
453 | return 1; | ||
454 | } | ||
455 | |||
456 | return 0; | ||
457 | } | ||
458 | |||
362 | static void | 459 | static void |
363 | trace_seq_reset(struct trace_seq *s) | 460 | trace_seq_reset(struct trace_seq *s) |
364 | { | 461 | { |
@@ -470,7 +567,17 @@ int register_tracer(struct tracer *type) | |||
470 | return -1; | 567 | return -1; |
471 | } | 568 | } |
472 | 569 | ||
570 | /* | ||
571 | * When this gets called we hold the BKL which means that | ||
572 | * preemption is disabled. Various trace selftests however | ||
573 | * need to disable and enable preemption for successful tests. | ||
574 | * So we drop the BKL here and grab it after the tests again. | ||
575 | */ | ||
576 | unlock_kernel(); | ||
473 | mutex_lock(&trace_types_lock); | 577 | mutex_lock(&trace_types_lock); |
578 | |||
579 | tracing_selftest_running = true; | ||
580 | |||
474 | for (t = trace_types; t; t = t->next) { | 581 | for (t = trace_types; t; t = t->next) { |
475 | if (strcmp(type->name, t->name) == 0) { | 582 | if (strcmp(type->name, t->name) == 0) { |
476 | /* already found */ | 583 | /* already found */ |
@@ -481,12 +588,20 @@ int register_tracer(struct tracer *type) | |||
481 | } | 588 | } |
482 | } | 589 | } |
483 | 590 | ||
591 | if (!type->set_flag) | ||
592 | type->set_flag = &dummy_set_flag; | ||
593 | if (!type->flags) | ||
594 | type->flags = &dummy_tracer_flags; | ||
595 | else | ||
596 | if (!type->flags->opts) | ||
597 | type->flags->opts = dummy_tracer_opt; | ||
598 | |||
484 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 599 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
485 | if (type->selftest) { | 600 | if (type->selftest) { |
486 | struct tracer *saved_tracer = current_trace; | 601 | struct tracer *saved_tracer = current_trace; |
487 | struct trace_array *tr = &global_trace; | 602 | struct trace_array *tr = &global_trace; |
488 | int saved_ctrl = tr->ctrl; | ||
489 | int i; | 603 | int i; |
604 | |||
490 | /* | 605 | /* |
491 | * Run a selftest on this tracer. | 606 | * Run a selftest on this tracer. |
492 | * Here we reset the trace buffer, and set the current | 607 | * Here we reset the trace buffer, and set the current |
@@ -494,25 +609,23 @@ int register_tracer(struct tracer *type) | |||
494 | * internal tracing to verify that everything is in order. | 609 | * internal tracing to verify that everything is in order. |
495 | * If we fail, we do not register this tracer. | 610 | * If we fail, we do not register this tracer. |
496 | */ | 611 | */ |
497 | for_each_tracing_cpu(i) { | 612 | for_each_tracing_cpu(i) |
498 | tracing_reset(tr, i); | 613 | tracing_reset(tr, i); |
499 | } | 614 | |
500 | current_trace = type; | 615 | current_trace = type; |
501 | tr->ctrl = 0; | ||
502 | /* the test is responsible for initializing and enabling */ | 616 | /* the test is responsible for initializing and enabling */ |
503 | pr_info("Testing tracer %s: ", type->name); | 617 | pr_info("Testing tracer %s: ", type->name); |
504 | ret = type->selftest(type, tr); | 618 | ret = type->selftest(type, tr); |
505 | /* the test is responsible for resetting too */ | 619 | /* the test is responsible for resetting too */ |
506 | current_trace = saved_tracer; | 620 | current_trace = saved_tracer; |
507 | tr->ctrl = saved_ctrl; | ||
508 | if (ret) { | 621 | if (ret) { |
509 | printk(KERN_CONT "FAILED!\n"); | 622 | printk(KERN_CONT "FAILED!\n"); |
510 | goto out; | 623 | goto out; |
511 | } | 624 | } |
512 | /* Only reset on passing, to avoid touching corrupted buffers */ | 625 | /* Only reset on passing, to avoid touching corrupted buffers */ |
513 | for_each_tracing_cpu(i) { | 626 | for_each_tracing_cpu(i) |
514 | tracing_reset(tr, i); | 627 | tracing_reset(tr, i); |
515 | } | 628 | |
516 | printk(KERN_CONT "PASSED\n"); | 629 | printk(KERN_CONT "PASSED\n"); |
517 | } | 630 | } |
518 | #endif | 631 | #endif |
@@ -524,7 +637,9 @@ int register_tracer(struct tracer *type) | |||
524 | max_tracer_type_len = len; | 637 | max_tracer_type_len = len; |
525 | 638 | ||
526 | out: | 639 | out: |
640 | tracing_selftest_running = false; | ||
527 | mutex_unlock(&trace_types_lock); | 641 | mutex_unlock(&trace_types_lock); |
642 | lock_kernel(); | ||
528 | 643 | ||
529 | return ret; | 644 | return ret; |
530 | } | 645 | } |
@@ -564,6 +679,16 @@ void tracing_reset(struct trace_array *tr, int cpu) | |||
564 | ftrace_enable_cpu(); | 679 | ftrace_enable_cpu(); |
565 | } | 680 | } |
566 | 681 | ||
682 | void tracing_reset_online_cpus(struct trace_array *tr) | ||
683 | { | ||
684 | int cpu; | ||
685 | |||
686 | tr->time_start = ftrace_now(tr->cpu); | ||
687 | |||
688 | for_each_online_cpu(cpu) | ||
689 | tracing_reset(tr, cpu); | ||
690 | } | ||
691 | |||
567 | #define SAVED_CMDLINES 128 | 692 | #define SAVED_CMDLINES 128 |
568 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | 693 | static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; |
569 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 694 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
@@ -581,6 +706,91 @@ static void trace_init_cmdlines(void) | |||
581 | cmdline_idx = 0; | 706 | cmdline_idx = 0; |
582 | } | 707 | } |
583 | 708 | ||
709 | static int trace_stop_count; | ||
710 | static DEFINE_SPINLOCK(tracing_start_lock); | ||
711 | |||
712 | /** | ||
713 | * ftrace_off_permanent - disable all ftrace code permanently | ||
714 | * | ||
715 | * This should only be called when a serious anomally has | ||
716 | * been detected. This will turn off the function tracing, | ||
717 | * ring buffers, and other tracing utilites. It takes no | ||
718 | * locks and can be called from any context. | ||
719 | */ | ||
720 | void ftrace_off_permanent(void) | ||
721 | { | ||
722 | tracing_disabled = 1; | ||
723 | ftrace_stop(); | ||
724 | tracing_off_permanent(); | ||
725 | } | ||
726 | |||
727 | /** | ||
728 | * tracing_start - quick start of the tracer | ||
729 | * | ||
730 | * If tracing is enabled but was stopped by tracing_stop, | ||
731 | * this will start the tracer back up. | ||
732 | */ | ||
733 | void tracing_start(void) | ||
734 | { | ||
735 | struct ring_buffer *buffer; | ||
736 | unsigned long flags; | ||
737 | |||
738 | if (tracing_disabled) | ||
739 | return; | ||
740 | |||
741 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
742 | if (--trace_stop_count) | ||
743 | goto out; | ||
744 | |||
745 | if (trace_stop_count < 0) { | ||
746 | /* Someone screwed up their debugging */ | ||
747 | WARN_ON_ONCE(1); | ||
748 | trace_stop_count = 0; | ||
749 | goto out; | ||
750 | } | ||
751 | |||
752 | |||
753 | buffer = global_trace.buffer; | ||
754 | if (buffer) | ||
755 | ring_buffer_record_enable(buffer); | ||
756 | |||
757 | buffer = max_tr.buffer; | ||
758 | if (buffer) | ||
759 | ring_buffer_record_enable(buffer); | ||
760 | |||
761 | ftrace_start(); | ||
762 | out: | ||
763 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
764 | } | ||
765 | |||
766 | /** | ||
767 | * tracing_stop - quick stop of the tracer | ||
768 | * | ||
769 | * Light weight way to stop tracing. Use in conjunction with | ||
770 | * tracing_start. | ||
771 | */ | ||
772 | void tracing_stop(void) | ||
773 | { | ||
774 | struct ring_buffer *buffer; | ||
775 | unsigned long flags; | ||
776 | |||
777 | ftrace_stop(); | ||
778 | spin_lock_irqsave(&tracing_start_lock, flags); | ||
779 | if (trace_stop_count++) | ||
780 | goto out; | ||
781 | |||
782 | buffer = global_trace.buffer; | ||
783 | if (buffer) | ||
784 | ring_buffer_record_disable(buffer); | ||
785 | |||
786 | buffer = max_tr.buffer; | ||
787 | if (buffer) | ||
788 | ring_buffer_record_disable(buffer); | ||
789 | |||
790 | out: | ||
791 | spin_unlock_irqrestore(&tracing_start_lock, flags); | ||
792 | } | ||
793 | |||
584 | void trace_stop_cmdline_recording(void); | 794 | void trace_stop_cmdline_recording(void); |
585 | 795 | ||
586 | static void trace_save_cmdline(struct task_struct *tsk) | 796 | static void trace_save_cmdline(struct task_struct *tsk) |
@@ -618,7 +828,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
618 | spin_unlock(&trace_cmdline_lock); | 828 | spin_unlock(&trace_cmdline_lock); |
619 | } | 829 | } |
620 | 830 | ||
621 | static char *trace_find_cmdline(int pid) | 831 | char *trace_find_cmdline(int pid) |
622 | { | 832 | { |
623 | char *cmdline = "<...>"; | 833 | char *cmdline = "<...>"; |
624 | unsigned map; | 834 | unsigned map; |
@@ -655,6 +865,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
655 | 865 | ||
656 | entry->preempt_count = pc & 0xff; | 866 | entry->preempt_count = pc & 0xff; |
657 | entry->pid = (tsk) ? tsk->pid : 0; | 867 | entry->pid = (tsk) ? tsk->pid : 0; |
868 | entry->tgid = (tsk) ? tsk->tgid : 0; | ||
658 | entry->flags = | 869 | entry->flags = |
659 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 870 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT |
660 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | | 871 | (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | |
@@ -691,6 +902,56 @@ trace_function(struct trace_array *tr, struct trace_array_cpu *data, | |||
691 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 902 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
692 | } | 903 | } |
693 | 904 | ||
905 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
906 | static void __trace_graph_entry(struct trace_array *tr, | ||
907 | struct trace_array_cpu *data, | ||
908 | struct ftrace_graph_ent *trace, | ||
909 | unsigned long flags, | ||
910 | int pc) | ||
911 | { | ||
912 | struct ring_buffer_event *event; | ||
913 | struct ftrace_graph_ent_entry *entry; | ||
914 | unsigned long irq_flags; | ||
915 | |||
916 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
917 | return; | ||
918 | |||
919 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
920 | &irq_flags); | ||
921 | if (!event) | ||
922 | return; | ||
923 | entry = ring_buffer_event_data(event); | ||
924 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
925 | entry->ent.type = TRACE_GRAPH_ENT; | ||
926 | entry->graph_ent = *trace; | ||
927 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
928 | } | ||
929 | |||
930 | static void __trace_graph_return(struct trace_array *tr, | ||
931 | struct trace_array_cpu *data, | ||
932 | struct ftrace_graph_ret *trace, | ||
933 | unsigned long flags, | ||
934 | int pc) | ||
935 | { | ||
936 | struct ring_buffer_event *event; | ||
937 | struct ftrace_graph_ret_entry *entry; | ||
938 | unsigned long irq_flags; | ||
939 | |||
940 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | ||
941 | return; | ||
942 | |||
943 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | ||
944 | &irq_flags); | ||
945 | if (!event) | ||
946 | return; | ||
947 | entry = ring_buffer_event_data(event); | ||
948 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
949 | entry->ent.type = TRACE_GRAPH_RET; | ||
950 | entry->ret = *trace; | ||
951 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | ||
952 | } | ||
953 | #endif | ||
954 | |||
694 | void | 955 | void |
695 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, | 956 | ftrace(struct trace_array *tr, struct trace_array_cpu *data, |
696 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 957 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
@@ -742,6 +1003,46 @@ void __trace_stack(struct trace_array *tr, | |||
742 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 1003 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); |
743 | } | 1004 | } |
744 | 1005 | ||
1006 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
1007 | struct trace_array_cpu *data, | ||
1008 | unsigned long flags, int pc) | ||
1009 | { | ||
1010 | #ifdef CONFIG_STACKTRACE | ||
1011 | struct ring_buffer_event *event; | ||
1012 | struct userstack_entry *entry; | ||
1013 | struct stack_trace trace; | ||
1014 | unsigned long irq_flags; | ||
1015 | |||
1016 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | ||
1017 | return; | ||
1018 | |||
1019 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
1020 | &irq_flags); | ||
1021 | if (!event) | ||
1022 | return; | ||
1023 | entry = ring_buffer_event_data(event); | ||
1024 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
1025 | entry->ent.type = TRACE_USER_STACK; | ||
1026 | |||
1027 | memset(&entry->caller, 0, sizeof(entry->caller)); | ||
1028 | |||
1029 | trace.nr_entries = 0; | ||
1030 | trace.max_entries = FTRACE_STACK_ENTRIES; | ||
1031 | trace.skip = 0; | ||
1032 | trace.entries = entry->caller; | ||
1033 | |||
1034 | save_stack_trace_user(&trace); | ||
1035 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
1036 | #endif | ||
1037 | } | ||
1038 | |||
1039 | void __trace_userstack(struct trace_array *tr, | ||
1040 | struct trace_array_cpu *data, | ||
1041 | unsigned long flags) | ||
1042 | { | ||
1043 | ftrace_trace_userstack(tr, data, flags, preempt_count()); | ||
1044 | } | ||
1045 | |||
745 | static void | 1046 | static void |
746 | ftrace_trace_special(void *__tr, void *__data, | 1047 | ftrace_trace_special(void *__tr, void *__data, |
747 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 1048 | unsigned long arg1, unsigned long arg2, unsigned long arg3, |
@@ -765,6 +1066,7 @@ ftrace_trace_special(void *__tr, void *__data, | |||
765 | entry->arg3 = arg3; | 1066 | entry->arg3 = arg3; |
766 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1067 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
767 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); | 1068 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); |
1069 | ftrace_trace_userstack(tr, data, irq_flags, pc); | ||
768 | 1070 | ||
769 | trace_wake_up(); | 1071 | trace_wake_up(); |
770 | } | 1072 | } |
@@ -803,6 +1105,7 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
803 | entry->next_cpu = task_cpu(next); | 1105 | entry->next_cpu = task_cpu(next); |
804 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1106 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
805 | ftrace_trace_stack(tr, data, flags, 5, pc); | 1107 | ftrace_trace_stack(tr, data, flags, 5, pc); |
1108 | ftrace_trace_userstack(tr, data, flags, pc); | ||
806 | } | 1109 | } |
807 | 1110 | ||
808 | void | 1111 | void |
@@ -832,6 +1135,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
832 | entry->next_cpu = task_cpu(wakee); | 1135 | entry->next_cpu = task_cpu(wakee); |
833 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1136 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
834 | ftrace_trace_stack(tr, data, flags, 6, pc); | 1137 | ftrace_trace_stack(tr, data, flags, 6, pc); |
1138 | ftrace_trace_userstack(tr, data, flags, pc); | ||
835 | 1139 | ||
836 | trace_wake_up(); | 1140 | trace_wake_up(); |
837 | } | 1141 | } |
@@ -841,26 +1145,28 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
841 | { | 1145 | { |
842 | struct trace_array *tr = &global_trace; | 1146 | struct trace_array *tr = &global_trace; |
843 | struct trace_array_cpu *data; | 1147 | struct trace_array_cpu *data; |
1148 | unsigned long flags; | ||
844 | int cpu; | 1149 | int cpu; |
845 | int pc; | 1150 | int pc; |
846 | 1151 | ||
847 | if (tracing_disabled || !tr->ctrl) | 1152 | if (tracing_disabled) |
848 | return; | 1153 | return; |
849 | 1154 | ||
850 | pc = preempt_count(); | 1155 | pc = preempt_count(); |
851 | preempt_disable_notrace(); | 1156 | local_irq_save(flags); |
852 | cpu = raw_smp_processor_id(); | 1157 | cpu = raw_smp_processor_id(); |
853 | data = tr->data[cpu]; | 1158 | data = tr->data[cpu]; |
854 | 1159 | ||
855 | if (likely(!atomic_read(&data->disabled))) | 1160 | if (likely(atomic_inc_return(&data->disabled) == 1)) |
856 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); | 1161 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); |
857 | 1162 | ||
858 | preempt_enable_notrace(); | 1163 | atomic_dec(&data->disabled); |
1164 | local_irq_restore(flags); | ||
859 | } | 1165 | } |
860 | 1166 | ||
861 | #ifdef CONFIG_FUNCTION_TRACER | 1167 | #ifdef CONFIG_FUNCTION_TRACER |
862 | static void | 1168 | static void |
863 | function_trace_call(unsigned long ip, unsigned long parent_ip) | 1169 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) |
864 | { | 1170 | { |
865 | struct trace_array *tr = &global_trace; | 1171 | struct trace_array *tr = &global_trace; |
866 | struct trace_array_cpu *data; | 1172 | struct trace_array_cpu *data; |
@@ -873,8 +1179,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
873 | return; | 1179 | return; |
874 | 1180 | ||
875 | pc = preempt_count(); | 1181 | pc = preempt_count(); |
876 | resched = need_resched(); | 1182 | resched = ftrace_preempt_disable(); |
877 | preempt_disable_notrace(); | ||
878 | local_save_flags(flags); | 1183 | local_save_flags(flags); |
879 | cpu = raw_smp_processor_id(); | 1184 | cpu = raw_smp_processor_id(); |
880 | data = tr->data[cpu]; | 1185 | data = tr->data[cpu]; |
@@ -884,12 +1189,97 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) | |||
884 | trace_function(tr, data, ip, parent_ip, flags, pc); | 1189 | trace_function(tr, data, ip, parent_ip, flags, pc); |
885 | 1190 | ||
886 | atomic_dec(&data->disabled); | 1191 | atomic_dec(&data->disabled); |
887 | if (resched) | 1192 | ftrace_preempt_enable(resched); |
888 | preempt_enable_no_resched_notrace(); | ||
889 | else | ||
890 | preempt_enable_notrace(); | ||
891 | } | 1193 | } |
892 | 1194 | ||
1195 | static void | ||
1196 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
1197 | { | ||
1198 | struct trace_array *tr = &global_trace; | ||
1199 | struct trace_array_cpu *data; | ||
1200 | unsigned long flags; | ||
1201 | long disabled; | ||
1202 | int cpu; | ||
1203 | int pc; | ||
1204 | |||
1205 | if (unlikely(!ftrace_function_enabled)) | ||
1206 | return; | ||
1207 | |||
1208 | /* | ||
1209 | * Need to use raw, since this must be called before the | ||
1210 | * recursive protection is performed. | ||
1211 | */ | ||
1212 | local_irq_save(flags); | ||
1213 | cpu = raw_smp_processor_id(); | ||
1214 | data = tr->data[cpu]; | ||
1215 | disabled = atomic_inc_return(&data->disabled); | ||
1216 | |||
1217 | if (likely(disabled == 1)) { | ||
1218 | pc = preempt_count(); | ||
1219 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1220 | } | ||
1221 | |||
1222 | atomic_dec(&data->disabled); | ||
1223 | local_irq_restore(flags); | ||
1224 | } | ||
1225 | |||
1226 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
1227 | int trace_graph_entry(struct ftrace_graph_ent *trace) | ||
1228 | { | ||
1229 | struct trace_array *tr = &global_trace; | ||
1230 | struct trace_array_cpu *data; | ||
1231 | unsigned long flags; | ||
1232 | long disabled; | ||
1233 | int cpu; | ||
1234 | int pc; | ||
1235 | |||
1236 | if (!ftrace_trace_task(current)) | ||
1237 | return 0; | ||
1238 | |||
1239 | if (!ftrace_graph_addr(trace->func)) | ||
1240 | return 0; | ||
1241 | |||
1242 | local_irq_save(flags); | ||
1243 | cpu = raw_smp_processor_id(); | ||
1244 | data = tr->data[cpu]; | ||
1245 | disabled = atomic_inc_return(&data->disabled); | ||
1246 | if (likely(disabled == 1)) { | ||
1247 | pc = preempt_count(); | ||
1248 | __trace_graph_entry(tr, data, trace, flags, pc); | ||
1249 | } | ||
1250 | /* Only do the atomic if it is not already set */ | ||
1251 | if (!test_tsk_trace_graph(current)) | ||
1252 | set_tsk_trace_graph(current); | ||
1253 | atomic_dec(&data->disabled); | ||
1254 | local_irq_restore(flags); | ||
1255 | |||
1256 | return 1; | ||
1257 | } | ||
1258 | |||
1259 | void trace_graph_return(struct ftrace_graph_ret *trace) | ||
1260 | { | ||
1261 | struct trace_array *tr = &global_trace; | ||
1262 | struct trace_array_cpu *data; | ||
1263 | unsigned long flags; | ||
1264 | long disabled; | ||
1265 | int cpu; | ||
1266 | int pc; | ||
1267 | |||
1268 | local_irq_save(flags); | ||
1269 | cpu = raw_smp_processor_id(); | ||
1270 | data = tr->data[cpu]; | ||
1271 | disabled = atomic_inc_return(&data->disabled); | ||
1272 | if (likely(disabled == 1)) { | ||
1273 | pc = preempt_count(); | ||
1274 | __trace_graph_return(tr, data, trace, flags, pc); | ||
1275 | } | ||
1276 | if (!trace->depth) | ||
1277 | clear_tsk_trace_graph(current); | ||
1278 | atomic_dec(&data->disabled); | ||
1279 | local_irq_restore(flags); | ||
1280 | } | ||
1281 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
1282 | |||
893 | static struct ftrace_ops trace_ops __read_mostly = | 1283 | static struct ftrace_ops trace_ops __read_mostly = |
894 | { | 1284 | { |
895 | .func = function_trace_call, | 1285 | .func = function_trace_call, |
@@ -898,9 +1288,14 @@ static struct ftrace_ops trace_ops __read_mostly = | |||
898 | void tracing_start_function_trace(void) | 1288 | void tracing_start_function_trace(void) |
899 | { | 1289 | { |
900 | ftrace_function_enabled = 0; | 1290 | ftrace_function_enabled = 0; |
1291 | |||
1292 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
1293 | trace_ops.func = function_trace_call_preempt_only; | ||
1294 | else | ||
1295 | trace_ops.func = function_trace_call; | ||
1296 | |||
901 | register_ftrace_function(&trace_ops); | 1297 | register_ftrace_function(&trace_ops); |
902 | if (tracer_enabled) | 1298 | ftrace_function_enabled = 1; |
903 | ftrace_function_enabled = 1; | ||
904 | } | 1299 | } |
905 | 1300 | ||
906 | void tracing_stop_function_trace(void) | 1301 | void tracing_stop_function_trace(void) |
@@ -912,6 +1307,7 @@ void tracing_stop_function_trace(void) | |||
912 | 1307 | ||
913 | enum trace_file_type { | 1308 | enum trace_file_type { |
914 | TRACE_FILE_LAT_FMT = 1, | 1309 | TRACE_FILE_LAT_FMT = 1, |
1310 | TRACE_FILE_ANNOTATE = 2, | ||
915 | }; | 1311 | }; |
916 | 1312 | ||
917 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) | 1313 | static void trace_iterator_increment(struct trace_iterator *iter, int cpu) |
@@ -1047,10 +1443,6 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1047 | 1443 | ||
1048 | atomic_inc(&trace_record_cmdline_disabled); | 1444 | atomic_inc(&trace_record_cmdline_disabled); |
1049 | 1445 | ||
1050 | /* let the tracer grab locks here if needed */ | ||
1051 | if (current_trace->start) | ||
1052 | current_trace->start(iter); | ||
1053 | |||
1054 | if (*pos != iter->pos) { | 1446 | if (*pos != iter->pos) { |
1055 | iter->ent = NULL; | 1447 | iter->ent = NULL; |
1056 | iter->cpu = 0; | 1448 | iter->cpu = 0; |
@@ -1077,14 +1469,7 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1077 | 1469 | ||
1078 | static void s_stop(struct seq_file *m, void *p) | 1470 | static void s_stop(struct seq_file *m, void *p) |
1079 | { | 1471 | { |
1080 | struct trace_iterator *iter = m->private; | ||
1081 | |||
1082 | atomic_dec(&trace_record_cmdline_disabled); | 1472 | atomic_dec(&trace_record_cmdline_disabled); |
1083 | |||
1084 | /* let the tracer release locks here if needed */ | ||
1085 | if (current_trace && current_trace == iter->trace && iter->trace->stop) | ||
1086 | iter->trace->stop(iter); | ||
1087 | |||
1088 | mutex_unlock(&trace_types_lock); | 1473 | mutex_unlock(&trace_types_lock); |
1089 | } | 1474 | } |
1090 | 1475 | ||
@@ -1143,7 +1528,7 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
1143 | # define IP_FMT "%016lx" | 1528 | # define IP_FMT "%016lx" |
1144 | #endif | 1529 | #endif |
1145 | 1530 | ||
1146 | static int | 1531 | int |
1147 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 1532 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
1148 | { | 1533 | { |
1149 | int ret; | 1534 | int ret; |
@@ -1164,6 +1549,78 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
1164 | return ret; | 1549 | return ret; |
1165 | } | 1550 | } |
1166 | 1551 | ||
1552 | static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
1553 | unsigned long ip, unsigned long sym_flags) | ||
1554 | { | ||
1555 | struct file *file = NULL; | ||
1556 | unsigned long vmstart = 0; | ||
1557 | int ret = 1; | ||
1558 | |||
1559 | if (mm) { | ||
1560 | const struct vm_area_struct *vma; | ||
1561 | |||
1562 | down_read(&mm->mmap_sem); | ||
1563 | vma = find_vma(mm, ip); | ||
1564 | if (vma) { | ||
1565 | file = vma->vm_file; | ||
1566 | vmstart = vma->vm_start; | ||
1567 | } | ||
1568 | if (file) { | ||
1569 | ret = trace_seq_path(s, &file->f_path); | ||
1570 | if (ret) | ||
1571 | ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart); | ||
1572 | } | ||
1573 | up_read(&mm->mmap_sem); | ||
1574 | } | ||
1575 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
1576 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
1577 | return ret; | ||
1578 | } | ||
1579 | |||
1580 | static int | ||
1581 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
1582 | unsigned long sym_flags) | ||
1583 | { | ||
1584 | struct mm_struct *mm = NULL; | ||
1585 | int ret = 1; | ||
1586 | unsigned int i; | ||
1587 | |||
1588 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
1589 | struct task_struct *task; | ||
1590 | /* | ||
1591 | * we do the lookup on the thread group leader, | ||
1592 | * since individual threads might have already quit! | ||
1593 | */ | ||
1594 | rcu_read_lock(); | ||
1595 | task = find_task_by_vpid(entry->ent.tgid); | ||
1596 | if (task) | ||
1597 | mm = get_task_mm(task); | ||
1598 | rcu_read_unlock(); | ||
1599 | } | ||
1600 | |||
1601 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
1602 | unsigned long ip = entry->caller[i]; | ||
1603 | |||
1604 | if (ip == ULONG_MAX || !ret) | ||
1605 | break; | ||
1606 | if (i && ret) | ||
1607 | ret = trace_seq_puts(s, " <- "); | ||
1608 | if (!ip) { | ||
1609 | if (ret) | ||
1610 | ret = trace_seq_puts(s, "??"); | ||
1611 | continue; | ||
1612 | } | ||
1613 | if (!ret) | ||
1614 | break; | ||
1615 | if (ret) | ||
1616 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
1617 | } | ||
1618 | |||
1619 | if (mm) | ||
1620 | mmput(mm); | ||
1621 | return ret; | ||
1622 | } | ||
1623 | |||
1167 | static void print_lat_help_header(struct seq_file *m) | 1624 | static void print_lat_help_header(struct seq_file *m) |
1168 | { | 1625 | { |
1169 | seq_puts(m, "# _------=> CPU# \n"); | 1626 | seq_puts(m, "# _------=> CPU# \n"); |
@@ -1301,6 +1758,13 @@ lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, | |||
1301 | 1758 | ||
1302 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | 1759 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; |
1303 | 1760 | ||
1761 | static int task_state_char(unsigned long state) | ||
1762 | { | ||
1763 | int bit = state ? __ffs(state) + 1 : 0; | ||
1764 | |||
1765 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
1766 | } | ||
1767 | |||
1304 | /* | 1768 | /* |
1305 | * The message is supposed to contain an ending newline. | 1769 | * The message is supposed to contain an ending newline. |
1306 | * If the printing stops prematurely, try to add a newline of our own. | 1770 | * If the printing stops prematurely, try to add a newline of our own. |
@@ -1338,6 +1802,23 @@ void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | |||
1338 | trace_seq_putc(s, '\n'); | 1802 | trace_seq_putc(s, '\n'); |
1339 | } | 1803 | } |
1340 | 1804 | ||
1805 | static void test_cpu_buff_start(struct trace_iterator *iter) | ||
1806 | { | ||
1807 | struct trace_seq *s = &iter->seq; | ||
1808 | |||
1809 | if (!(trace_flags & TRACE_ITER_ANNOTATE)) | ||
1810 | return; | ||
1811 | |||
1812 | if (!(iter->iter_flags & TRACE_FILE_ANNOTATE)) | ||
1813 | return; | ||
1814 | |||
1815 | if (cpu_isset(iter->cpu, iter->started)) | ||
1816 | return; | ||
1817 | |||
1818 | cpu_set(iter->cpu, iter->started); | ||
1819 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | ||
1820 | } | ||
1821 | |||
1341 | static enum print_line_t | 1822 | static enum print_line_t |
1342 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | 1823 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) |
1343 | { | 1824 | { |
@@ -1352,11 +1833,12 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1352 | char *comm; | 1833 | char *comm; |
1353 | int S, T; | 1834 | int S, T; |
1354 | int i; | 1835 | int i; |
1355 | unsigned state; | ||
1356 | 1836 | ||
1357 | if (entry->type == TRACE_CONT) | 1837 | if (entry->type == TRACE_CONT) |
1358 | return TRACE_TYPE_HANDLED; | 1838 | return TRACE_TYPE_HANDLED; |
1359 | 1839 | ||
1840 | test_cpu_buff_start(iter); | ||
1841 | |||
1360 | next_entry = find_next_entry(iter, NULL, &next_ts); | 1842 | next_entry = find_next_entry(iter, NULL, &next_ts); |
1361 | if (!next_entry) | 1843 | if (!next_entry) |
1362 | next_ts = iter->ts; | 1844 | next_ts = iter->ts; |
@@ -1396,12 +1878,8 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1396 | 1878 | ||
1397 | trace_assign_type(field, entry); | 1879 | trace_assign_type(field, entry); |
1398 | 1880 | ||
1399 | T = field->next_state < sizeof(state_to_char) ? | 1881 | T = task_state_char(field->next_state); |
1400 | state_to_char[field->next_state] : 'X'; | 1882 | S = task_state_char(field->prev_state); |
1401 | |||
1402 | state = field->prev_state ? | ||
1403 | __ffs(field->prev_state) + 1 : 0; | ||
1404 | S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; | ||
1405 | comm = trace_find_cmdline(field->next_pid); | 1883 | comm = trace_find_cmdline(field->next_pid); |
1406 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | 1884 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", |
1407 | field->prev_pid, | 1885 | field->prev_pid, |
@@ -1448,6 +1926,27 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | |||
1448 | trace_seq_print_cont(s, iter); | 1926 | trace_seq_print_cont(s, iter); |
1449 | break; | 1927 | break; |
1450 | } | 1928 | } |
1929 | case TRACE_BRANCH: { | ||
1930 | struct trace_branch *field; | ||
1931 | |||
1932 | trace_assign_type(field, entry); | ||
1933 | |||
1934 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
1935 | field->correct ? " ok " : " MISS ", | ||
1936 | field->func, | ||
1937 | field->file, | ||
1938 | field->line); | ||
1939 | break; | ||
1940 | } | ||
1941 | case TRACE_USER_STACK: { | ||
1942 | struct userstack_entry *field; | ||
1943 | |||
1944 | trace_assign_type(field, entry); | ||
1945 | |||
1946 | seq_print_userip_objs(field, s, sym_flags); | ||
1947 | trace_seq_putc(s, '\n'); | ||
1948 | break; | ||
1949 | } | ||
1451 | default: | 1950 | default: |
1452 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | 1951 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
1453 | } | 1952 | } |
@@ -1472,6 +1971,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1472 | if (entry->type == TRACE_CONT) | 1971 | if (entry->type == TRACE_CONT) |
1473 | return TRACE_TYPE_HANDLED; | 1972 | return TRACE_TYPE_HANDLED; |
1474 | 1973 | ||
1974 | test_cpu_buff_start(iter); | ||
1975 | |||
1475 | comm = trace_find_cmdline(iter->ent->pid); | 1976 | comm = trace_find_cmdline(iter->ent->pid); |
1476 | 1977 | ||
1477 | t = ns2usecs(iter->ts); | 1978 | t = ns2usecs(iter->ts); |
@@ -1519,10 +2020,8 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1519 | 2020 | ||
1520 | trace_assign_type(field, entry); | 2021 | trace_assign_type(field, entry); |
1521 | 2022 | ||
1522 | S = field->prev_state < sizeof(state_to_char) ? | 2023 | T = task_state_char(field->next_state); |
1523 | state_to_char[field->prev_state] : 'X'; | 2024 | S = task_state_char(field->prev_state); |
1524 | T = field->next_state < sizeof(state_to_char) ? | ||
1525 | state_to_char[field->next_state] : 'X'; | ||
1526 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", | 2025 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", |
1527 | field->prev_pid, | 2026 | field->prev_pid, |
1528 | field->prev_prio, | 2027 | field->prev_prio, |
@@ -1581,6 +2080,37 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
1581 | trace_seq_print_cont(s, iter); | 2080 | trace_seq_print_cont(s, iter); |
1582 | break; | 2081 | break; |
1583 | } | 2082 | } |
2083 | case TRACE_GRAPH_RET: { | ||
2084 | return print_graph_function(iter); | ||
2085 | } | ||
2086 | case TRACE_GRAPH_ENT: { | ||
2087 | return print_graph_function(iter); | ||
2088 | } | ||
2089 | case TRACE_BRANCH: { | ||
2090 | struct trace_branch *field; | ||
2091 | |||
2092 | trace_assign_type(field, entry); | ||
2093 | |||
2094 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
2095 | field->correct ? " ok " : " MISS ", | ||
2096 | field->func, | ||
2097 | field->file, | ||
2098 | field->line); | ||
2099 | break; | ||
2100 | } | ||
2101 | case TRACE_USER_STACK: { | ||
2102 | struct userstack_entry *field; | ||
2103 | |||
2104 | trace_assign_type(field, entry); | ||
2105 | |||
2106 | ret = seq_print_userip_objs(field, s, sym_flags); | ||
2107 | if (!ret) | ||
2108 | return TRACE_TYPE_PARTIAL_LINE; | ||
2109 | ret = trace_seq_putc(s, '\n'); | ||
2110 | if (!ret) | ||
2111 | return TRACE_TYPE_PARTIAL_LINE; | ||
2112 | break; | ||
2113 | } | ||
1584 | } | 2114 | } |
1585 | return TRACE_TYPE_HANDLED; | 2115 | return TRACE_TYPE_HANDLED; |
1586 | } | 2116 | } |
@@ -1621,12 +2151,9 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
1621 | 2151 | ||
1622 | trace_assign_type(field, entry); | 2152 | trace_assign_type(field, entry); |
1623 | 2153 | ||
1624 | S = field->prev_state < sizeof(state_to_char) ? | 2154 | T = task_state_char(field->next_state); |
1625 | state_to_char[field->prev_state] : 'X'; | 2155 | S = entry->type == TRACE_WAKE ? '+' : |
1626 | T = field->next_state < sizeof(state_to_char) ? | 2156 | task_state_char(field->prev_state); |
1627 | state_to_char[field->next_state] : 'X'; | ||
1628 | if (entry->type == TRACE_WAKE) | ||
1629 | S = '+'; | ||
1630 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", | 2157 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", |
1631 | field->prev_pid, | 2158 | field->prev_pid, |
1632 | field->prev_prio, | 2159 | field->prev_prio, |
@@ -1640,6 +2167,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
1640 | break; | 2167 | break; |
1641 | } | 2168 | } |
1642 | case TRACE_SPECIAL: | 2169 | case TRACE_SPECIAL: |
2170 | case TRACE_USER_STACK: | ||
1643 | case TRACE_STACK: { | 2171 | case TRACE_STACK: { |
1644 | struct special_entry *field; | 2172 | struct special_entry *field; |
1645 | 2173 | ||
@@ -1712,12 +2240,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
1712 | 2240 | ||
1713 | trace_assign_type(field, entry); | 2241 | trace_assign_type(field, entry); |
1714 | 2242 | ||
1715 | S = field->prev_state < sizeof(state_to_char) ? | 2243 | T = task_state_char(field->next_state); |
1716 | state_to_char[field->prev_state] : 'X'; | 2244 | S = entry->type == TRACE_WAKE ? '+' : |
1717 | T = field->next_state < sizeof(state_to_char) ? | 2245 | task_state_char(field->prev_state); |
1718 | state_to_char[field->next_state] : 'X'; | ||
1719 | if (entry->type == TRACE_WAKE) | ||
1720 | S = '+'; | ||
1721 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | 2246 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); |
1722 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | 2247 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); |
1723 | SEQ_PUT_HEX_FIELD_RET(s, S); | 2248 | SEQ_PUT_HEX_FIELD_RET(s, S); |
@@ -1728,6 +2253,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
1728 | break; | 2253 | break; |
1729 | } | 2254 | } |
1730 | case TRACE_SPECIAL: | 2255 | case TRACE_SPECIAL: |
2256 | case TRACE_USER_STACK: | ||
1731 | case TRACE_STACK: { | 2257 | case TRACE_STACK: { |
1732 | struct special_entry *field; | 2258 | struct special_entry *field; |
1733 | 2259 | ||
@@ -1744,6 +2270,25 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
1744 | return TRACE_TYPE_HANDLED; | 2270 | return TRACE_TYPE_HANDLED; |
1745 | } | 2271 | } |
1746 | 2272 | ||
2273 | static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | ||
2274 | { | ||
2275 | struct trace_seq *s = &iter->seq; | ||
2276 | struct trace_entry *entry = iter->ent; | ||
2277 | struct print_entry *field; | ||
2278 | int ret; | ||
2279 | |||
2280 | trace_assign_type(field, entry); | ||
2281 | |||
2282 | ret = trace_seq_printf(s, field->buf); | ||
2283 | if (!ret) | ||
2284 | return TRACE_TYPE_PARTIAL_LINE; | ||
2285 | |||
2286 | if (entry->flags & TRACE_FLAG_CONT) | ||
2287 | trace_seq_print_cont(s, iter); | ||
2288 | |||
2289 | return TRACE_TYPE_HANDLED; | ||
2290 | } | ||
2291 | |||
1747 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | 2292 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) |
1748 | { | 2293 | { |
1749 | struct trace_seq *s = &iter->seq; | 2294 | struct trace_seq *s = &iter->seq; |
@@ -1782,6 +2327,7 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
1782 | break; | 2327 | break; |
1783 | } | 2328 | } |
1784 | case TRACE_SPECIAL: | 2329 | case TRACE_SPECIAL: |
2330 | case TRACE_USER_STACK: | ||
1785 | case TRACE_STACK: { | 2331 | case TRACE_STACK: { |
1786 | struct special_entry *field; | 2332 | struct special_entry *field; |
1787 | 2333 | ||
@@ -1823,6 +2369,11 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1823 | return ret; | 2369 | return ret; |
1824 | } | 2370 | } |
1825 | 2371 | ||
2372 | if (iter->ent->type == TRACE_PRINT && | ||
2373 | trace_flags & TRACE_ITER_PRINTK && | ||
2374 | trace_flags & TRACE_ITER_PRINTK_MSGONLY) | ||
2375 | return print_printk_msg_only(iter); | ||
2376 | |||
1826 | if (trace_flags & TRACE_ITER_BIN) | 2377 | if (trace_flags & TRACE_ITER_BIN) |
1827 | return print_bin_fmt(iter); | 2378 | return print_bin_fmt(iter); |
1828 | 2379 | ||
@@ -1847,7 +2398,9 @@ static int s_show(struct seq_file *m, void *v) | |||
1847 | seq_printf(m, "# tracer: %s\n", iter->trace->name); | 2398 | seq_printf(m, "# tracer: %s\n", iter->trace->name); |
1848 | seq_puts(m, "#\n"); | 2399 | seq_puts(m, "#\n"); |
1849 | } | 2400 | } |
1850 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2401 | if (iter->trace && iter->trace->print_header) |
2402 | iter->trace->print_header(m); | ||
2403 | else if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | ||
1851 | /* print nothing if the buffers are empty */ | 2404 | /* print nothing if the buffers are empty */ |
1852 | if (trace_empty(iter)) | 2405 | if (trace_empty(iter)) |
1853 | return 0; | 2406 | return 0; |
@@ -1899,6 +2452,15 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1899 | iter->trace = current_trace; | 2452 | iter->trace = current_trace; |
1900 | iter->pos = -1; | 2453 | iter->pos = -1; |
1901 | 2454 | ||
2455 | /* Notify the tracer early; before we stop tracing. */ | ||
2456 | if (iter->trace && iter->trace->open) | ||
2457 | iter->trace->open(iter); | ||
2458 | |||
2459 | /* Annotate start of buffers if we had overruns */ | ||
2460 | if (ring_buffer_overruns(iter->tr->buffer)) | ||
2461 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | ||
2462 | |||
2463 | |||
1902 | for_each_tracing_cpu(cpu) { | 2464 | for_each_tracing_cpu(cpu) { |
1903 | 2465 | ||
1904 | iter->buffer_iter[cpu] = | 2466 | iter->buffer_iter[cpu] = |
@@ -1917,13 +2479,7 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
1917 | m->private = iter; | 2479 | m->private = iter; |
1918 | 2480 | ||
1919 | /* stop the trace while dumping */ | 2481 | /* stop the trace while dumping */ |
1920 | if (iter->tr->ctrl) { | 2482 | tracing_stop(); |
1921 | tracer_enabled = 0; | ||
1922 | ftrace_function_enabled = 0; | ||
1923 | } | ||
1924 | |||
1925 | if (iter->trace && iter->trace->open) | ||
1926 | iter->trace->open(iter); | ||
1927 | 2483 | ||
1928 | mutex_unlock(&trace_types_lock); | 2484 | mutex_unlock(&trace_types_lock); |
1929 | 2485 | ||
@@ -1966,14 +2522,7 @@ int tracing_release(struct inode *inode, struct file *file) | |||
1966 | iter->trace->close(iter); | 2522 | iter->trace->close(iter); |
1967 | 2523 | ||
1968 | /* reenable tracing if it was previously enabled */ | 2524 | /* reenable tracing if it was previously enabled */ |
1969 | if (iter->tr->ctrl) { | 2525 | tracing_start(); |
1970 | tracer_enabled = 1; | ||
1971 | /* | ||
1972 | * It is safe to enable function tracing even if it | ||
1973 | * isn't used | ||
1974 | */ | ||
1975 | ftrace_function_enabled = 1; | ||
1976 | } | ||
1977 | mutex_unlock(&trace_types_lock); | 2526 | mutex_unlock(&trace_types_lock); |
1978 | 2527 | ||
1979 | seq_release(inode, file); | 2528 | seq_release(inode, file); |
@@ -2151,7 +2700,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2151 | if (err) | 2700 | if (err) |
2152 | goto err_unlock; | 2701 | goto err_unlock; |
2153 | 2702 | ||
2154 | raw_local_irq_disable(); | 2703 | local_irq_disable(); |
2155 | __raw_spin_lock(&ftrace_max_lock); | 2704 | __raw_spin_lock(&ftrace_max_lock); |
2156 | for_each_tracing_cpu(cpu) { | 2705 | for_each_tracing_cpu(cpu) { |
2157 | /* | 2706 | /* |
@@ -2168,7 +2717,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
2168 | } | 2717 | } |
2169 | } | 2718 | } |
2170 | __raw_spin_unlock(&ftrace_max_lock); | 2719 | __raw_spin_unlock(&ftrace_max_lock); |
2171 | raw_local_irq_enable(); | 2720 | local_irq_enable(); |
2172 | 2721 | ||
2173 | tracing_cpumask = tracing_cpumask_new; | 2722 | tracing_cpumask = tracing_cpumask_new; |
2174 | 2723 | ||
@@ -2189,13 +2738,16 @@ static struct file_operations tracing_cpumask_fops = { | |||
2189 | }; | 2738 | }; |
2190 | 2739 | ||
2191 | static ssize_t | 2740 | static ssize_t |
2192 | tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | 2741 | tracing_trace_options_read(struct file *filp, char __user *ubuf, |
2193 | size_t cnt, loff_t *ppos) | 2742 | size_t cnt, loff_t *ppos) |
2194 | { | 2743 | { |
2744 | int i; | ||
2195 | char *buf; | 2745 | char *buf; |
2196 | int r = 0; | 2746 | int r = 0; |
2197 | int len = 0; | 2747 | int len = 0; |
2198 | int i; | 2748 | u32 tracer_flags = current_trace->flags->val; |
2749 | struct tracer_opt *trace_opts = current_trace->flags->opts; | ||
2750 | |||
2199 | 2751 | ||
2200 | /* calulate max size */ | 2752 | /* calulate max size */ |
2201 | for (i = 0; trace_options[i]; i++) { | 2753 | for (i = 0; trace_options[i]; i++) { |
@@ -2203,6 +2755,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2203 | len += 3; /* "no" and space */ | 2755 | len += 3; /* "no" and space */ |
2204 | } | 2756 | } |
2205 | 2757 | ||
2758 | /* | ||
2759 | * Increase the size with names of options specific | ||
2760 | * of the current tracer. | ||
2761 | */ | ||
2762 | for (i = 0; trace_opts[i].name; i++) { | ||
2763 | len += strlen(trace_opts[i].name); | ||
2764 | len += 3; /* "no" and space */ | ||
2765 | } | ||
2766 | |||
2206 | /* +2 for \n and \0 */ | 2767 | /* +2 for \n and \0 */ |
2207 | buf = kmalloc(len + 2, GFP_KERNEL); | 2768 | buf = kmalloc(len + 2, GFP_KERNEL); |
2208 | if (!buf) | 2769 | if (!buf) |
@@ -2215,6 +2776,15 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2215 | r += sprintf(buf + r, "no%s ", trace_options[i]); | 2776 | r += sprintf(buf + r, "no%s ", trace_options[i]); |
2216 | } | 2777 | } |
2217 | 2778 | ||
2779 | for (i = 0; trace_opts[i].name; i++) { | ||
2780 | if (tracer_flags & trace_opts[i].bit) | ||
2781 | r += sprintf(buf + r, "%s ", | ||
2782 | trace_opts[i].name); | ||
2783 | else | ||
2784 | r += sprintf(buf + r, "no%s ", | ||
2785 | trace_opts[i].name); | ||
2786 | } | ||
2787 | |||
2218 | r += sprintf(buf + r, "\n"); | 2788 | r += sprintf(buf + r, "\n"); |
2219 | WARN_ON(r >= len + 2); | 2789 | WARN_ON(r >= len + 2); |
2220 | 2790 | ||
@@ -2225,13 +2795,48 @@ tracing_iter_ctrl_read(struct file *filp, char __user *ubuf, | |||
2225 | return r; | 2795 | return r; |
2226 | } | 2796 | } |
2227 | 2797 | ||
2798 | /* Try to assign a tracer specific option */ | ||
2799 | static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | ||
2800 | { | ||
2801 | struct tracer_flags *trace_flags = trace->flags; | ||
2802 | struct tracer_opt *opts = NULL; | ||
2803 | int ret = 0, i = 0; | ||
2804 | int len; | ||
2805 | |||
2806 | for (i = 0; trace_flags->opts[i].name; i++) { | ||
2807 | opts = &trace_flags->opts[i]; | ||
2808 | len = strlen(opts->name); | ||
2809 | |||
2810 | if (strncmp(cmp, opts->name, len) == 0) { | ||
2811 | ret = trace->set_flag(trace_flags->val, | ||
2812 | opts->bit, !neg); | ||
2813 | break; | ||
2814 | } | ||
2815 | } | ||
2816 | /* Not found */ | ||
2817 | if (!trace_flags->opts[i].name) | ||
2818 | return -EINVAL; | ||
2819 | |||
2820 | /* Refused to handle */ | ||
2821 | if (ret) | ||
2822 | return ret; | ||
2823 | |||
2824 | if (neg) | ||
2825 | trace_flags->val &= ~opts->bit; | ||
2826 | else | ||
2827 | trace_flags->val |= opts->bit; | ||
2828 | |||
2829 | return 0; | ||
2830 | } | ||
2831 | |||
2228 | static ssize_t | 2832 | static ssize_t |
2229 | tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | 2833 | tracing_trace_options_write(struct file *filp, const char __user *ubuf, |
2230 | size_t cnt, loff_t *ppos) | 2834 | size_t cnt, loff_t *ppos) |
2231 | { | 2835 | { |
2232 | char buf[64]; | 2836 | char buf[64]; |
2233 | char *cmp = buf; | 2837 | char *cmp = buf; |
2234 | int neg = 0; | 2838 | int neg = 0; |
2839 | int ret; | ||
2235 | int i; | 2840 | int i; |
2236 | 2841 | ||
2237 | if (cnt >= sizeof(buf)) | 2842 | if (cnt >= sizeof(buf)) |
@@ -2258,11 +2863,13 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2258 | break; | 2863 | break; |
2259 | } | 2864 | } |
2260 | } | 2865 | } |
2261 | /* | 2866 | |
2262 | * If no option could be set, return an error: | 2867 | /* If no option could be set, test the specific tracer options */ |
2263 | */ | 2868 | if (!trace_options[i]) { |
2264 | if (!trace_options[i]) | 2869 | ret = set_tracer_option(current_trace, cmp, neg); |
2265 | return -EINVAL; | 2870 | if (ret) |
2871 | return ret; | ||
2872 | } | ||
2266 | 2873 | ||
2267 | filp->f_pos += cnt; | 2874 | filp->f_pos += cnt; |
2268 | 2875 | ||
@@ -2271,8 +2878,8 @@ tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2271 | 2878 | ||
2272 | static struct file_operations tracing_iter_fops = { | 2879 | static struct file_operations tracing_iter_fops = { |
2273 | .open = tracing_open_generic, | 2880 | .open = tracing_open_generic, |
2274 | .read = tracing_iter_ctrl_read, | 2881 | .read = tracing_trace_options_read, |
2275 | .write = tracing_iter_ctrl_write, | 2882 | .write = tracing_trace_options_write, |
2276 | }; | 2883 | }; |
2277 | 2884 | ||
2278 | static const char readme_msg[] = | 2885 | static const char readme_msg[] = |
@@ -2286,9 +2893,9 @@ static const char readme_msg[] = | |||
2286 | "# echo sched_switch > /debug/tracing/current_tracer\n" | 2893 | "# echo sched_switch > /debug/tracing/current_tracer\n" |
2287 | "# cat /debug/tracing/current_tracer\n" | 2894 | "# cat /debug/tracing/current_tracer\n" |
2288 | "sched_switch\n" | 2895 | "sched_switch\n" |
2289 | "# cat /debug/tracing/iter_ctrl\n" | 2896 | "# cat /debug/tracing/trace_options\n" |
2290 | "noprint-parent nosym-offset nosym-addr noverbose\n" | 2897 | "noprint-parent nosym-offset nosym-addr noverbose\n" |
2291 | "# echo print-parent > /debug/tracing/iter_ctrl\n" | 2898 | "# echo print-parent > /debug/tracing/trace_options\n" |
2292 | "# echo 1 > /debug/tracing/tracing_enabled\n" | 2899 | "# echo 1 > /debug/tracing/tracing_enabled\n" |
2293 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" | 2900 | "# cat /debug/tracing/trace > /tmp/trace.txt\n" |
2294 | "echo 0 > /debug/tracing/tracing_enabled\n" | 2901 | "echo 0 > /debug/tracing/tracing_enabled\n" |
@@ -2311,11 +2918,10 @@ static ssize_t | |||
2311 | tracing_ctrl_read(struct file *filp, char __user *ubuf, | 2918 | tracing_ctrl_read(struct file *filp, char __user *ubuf, |
2312 | size_t cnt, loff_t *ppos) | 2919 | size_t cnt, loff_t *ppos) |
2313 | { | 2920 | { |
2314 | struct trace_array *tr = filp->private_data; | ||
2315 | char buf[64]; | 2921 | char buf[64]; |
2316 | int r; | 2922 | int r; |
2317 | 2923 | ||
2318 | r = sprintf(buf, "%ld\n", tr->ctrl); | 2924 | r = sprintf(buf, "%u\n", tracer_enabled); |
2319 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2925 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2320 | } | 2926 | } |
2321 | 2927 | ||
@@ -2343,16 +2949,18 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2343 | val = !!val; | 2949 | val = !!val; |
2344 | 2950 | ||
2345 | mutex_lock(&trace_types_lock); | 2951 | mutex_lock(&trace_types_lock); |
2346 | if (tr->ctrl ^ val) { | 2952 | if (tracer_enabled ^ val) { |
2347 | if (val) | 2953 | if (val) { |
2348 | tracer_enabled = 1; | 2954 | tracer_enabled = 1; |
2349 | else | 2955 | if (current_trace->start) |
2956 | current_trace->start(tr); | ||
2957 | tracing_start(); | ||
2958 | } else { | ||
2350 | tracer_enabled = 0; | 2959 | tracer_enabled = 0; |
2351 | 2960 | tracing_stop(); | |
2352 | tr->ctrl = val; | 2961 | if (current_trace->stop) |
2353 | 2962 | current_trace->stop(tr); | |
2354 | if (current_trace && current_trace->ctrl_update) | 2963 | } |
2355 | current_trace->ctrl_update(tr); | ||
2356 | } | 2964 | } |
2357 | mutex_unlock(&trace_types_lock); | 2965 | mutex_unlock(&trace_types_lock); |
2358 | 2966 | ||
@@ -2378,29 +2986,11 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
2378 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2986 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2379 | } | 2987 | } |
2380 | 2988 | ||
2381 | static ssize_t | 2989 | static int tracing_set_tracer(char *buf) |
2382 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | ||
2383 | size_t cnt, loff_t *ppos) | ||
2384 | { | 2990 | { |
2385 | struct trace_array *tr = &global_trace; | 2991 | struct trace_array *tr = &global_trace; |
2386 | struct tracer *t; | 2992 | struct tracer *t; |
2387 | char buf[max_tracer_type_len+1]; | 2993 | int ret = 0; |
2388 | int i; | ||
2389 | size_t ret; | ||
2390 | |||
2391 | ret = cnt; | ||
2392 | |||
2393 | if (cnt > max_tracer_type_len) | ||
2394 | cnt = max_tracer_type_len; | ||
2395 | |||
2396 | if (copy_from_user(&buf, ubuf, cnt)) | ||
2397 | return -EFAULT; | ||
2398 | |||
2399 | buf[cnt] = 0; | ||
2400 | |||
2401 | /* strip ending whitespace. */ | ||
2402 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | ||
2403 | buf[i] = 0; | ||
2404 | 2994 | ||
2405 | mutex_lock(&trace_types_lock); | 2995 | mutex_lock(&trace_types_lock); |
2406 | for (t = trace_types; t; t = t->next) { | 2996 | for (t = trace_types; t; t = t->next) { |
@@ -2414,18 +3004,52 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf, | |||
2414 | if (t == current_trace) | 3004 | if (t == current_trace) |
2415 | goto out; | 3005 | goto out; |
2416 | 3006 | ||
3007 | trace_branch_disable(); | ||
2417 | if (current_trace && current_trace->reset) | 3008 | if (current_trace && current_trace->reset) |
2418 | current_trace->reset(tr); | 3009 | current_trace->reset(tr); |
2419 | 3010 | ||
2420 | current_trace = t; | 3011 | current_trace = t; |
2421 | if (t->init) | 3012 | if (t->init) { |
2422 | t->init(tr); | 3013 | ret = t->init(tr); |
3014 | if (ret) | ||
3015 | goto out; | ||
3016 | } | ||
2423 | 3017 | ||
3018 | trace_branch_enable(tr); | ||
2424 | out: | 3019 | out: |
2425 | mutex_unlock(&trace_types_lock); | 3020 | mutex_unlock(&trace_types_lock); |
2426 | 3021 | ||
2427 | if (ret > 0) | 3022 | return ret; |
2428 | filp->f_pos += ret; | 3023 | } |
3024 | |||
3025 | static ssize_t | ||
3026 | tracing_set_trace_write(struct file *filp, const char __user *ubuf, | ||
3027 | size_t cnt, loff_t *ppos) | ||
3028 | { | ||
3029 | char buf[max_tracer_type_len+1]; | ||
3030 | int i; | ||
3031 | size_t ret; | ||
3032 | int err; | ||
3033 | |||
3034 | ret = cnt; | ||
3035 | |||
3036 | if (cnt > max_tracer_type_len) | ||
3037 | cnt = max_tracer_type_len; | ||
3038 | |||
3039 | if (copy_from_user(&buf, ubuf, cnt)) | ||
3040 | return -EFAULT; | ||
3041 | |||
3042 | buf[cnt] = 0; | ||
3043 | |||
3044 | /* strip ending whitespace. */ | ||
3045 | for (i = cnt - 1; i > 0 && isspace(buf[i]); i--) | ||
3046 | buf[i] = 0; | ||
3047 | |||
3048 | err = tracing_set_tracer(buf); | ||
3049 | if (err) | ||
3050 | return err; | ||
3051 | |||
3052 | filp->f_pos += ret; | ||
2429 | 3053 | ||
2430 | return ret; | 3054 | return ret; |
2431 | } | 3055 | } |
@@ -2492,6 +3116,10 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
2492 | return -ENOMEM; | 3116 | return -ENOMEM; |
2493 | 3117 | ||
2494 | mutex_lock(&trace_types_lock); | 3118 | mutex_lock(&trace_types_lock); |
3119 | |||
3120 | /* trace pipe does not show start of buffer */ | ||
3121 | cpus_setall(iter->started); | ||
3122 | |||
2495 | iter->tr = &global_trace; | 3123 | iter->tr = &global_trace; |
2496 | iter->trace = current_trace; | 3124 | iter->trace = current_trace; |
2497 | filp->private_data = iter; | 3125 | filp->private_data = iter; |
@@ -2667,7 +3295,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
2667 | char buf[64]; | 3295 | char buf[64]; |
2668 | int r; | 3296 | int r; |
2669 | 3297 | ||
2670 | r = sprintf(buf, "%lu\n", tr->entries); | 3298 | r = sprintf(buf, "%lu\n", tr->entries >> 10); |
2671 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3299 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2672 | } | 3300 | } |
2673 | 3301 | ||
@@ -2678,7 +3306,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2678 | unsigned long val; | 3306 | unsigned long val; |
2679 | char buf[64]; | 3307 | char buf[64]; |
2680 | int ret, cpu; | 3308 | int ret, cpu; |
2681 | struct trace_array *tr = filp->private_data; | ||
2682 | 3309 | ||
2683 | if (cnt >= sizeof(buf)) | 3310 | if (cnt >= sizeof(buf)) |
2684 | return -EINVAL; | 3311 | return -EINVAL; |
@@ -2698,12 +3325,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2698 | 3325 | ||
2699 | mutex_lock(&trace_types_lock); | 3326 | mutex_lock(&trace_types_lock); |
2700 | 3327 | ||
2701 | if (tr->ctrl) { | 3328 | tracing_stop(); |
2702 | cnt = -EBUSY; | ||
2703 | pr_info("ftrace: please disable tracing" | ||
2704 | " before modifying buffer size\n"); | ||
2705 | goto out; | ||
2706 | } | ||
2707 | 3329 | ||
2708 | /* disable all cpu buffers */ | 3330 | /* disable all cpu buffers */ |
2709 | for_each_tracing_cpu(cpu) { | 3331 | for_each_tracing_cpu(cpu) { |
@@ -2713,6 +3335,9 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2713 | atomic_inc(&max_tr.data[cpu]->disabled); | 3335 | atomic_inc(&max_tr.data[cpu]->disabled); |
2714 | } | 3336 | } |
2715 | 3337 | ||
3338 | /* value is in KB */ | ||
3339 | val <<= 10; | ||
3340 | |||
2716 | if (val != global_trace.entries) { | 3341 | if (val != global_trace.entries) { |
2717 | ret = ring_buffer_resize(global_trace.buffer, val); | 3342 | ret = ring_buffer_resize(global_trace.buffer, val); |
2718 | if (ret < 0) { | 3343 | if (ret < 0) { |
@@ -2751,6 +3376,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
2751 | atomic_dec(&max_tr.data[cpu]->disabled); | 3376 | atomic_dec(&max_tr.data[cpu]->disabled); |
2752 | } | 3377 | } |
2753 | 3378 | ||
3379 | tracing_start(); | ||
2754 | max_tr.entries = global_trace.entries; | 3380 | max_tr.entries = global_trace.entries; |
2755 | mutex_unlock(&trace_types_lock); | 3381 | mutex_unlock(&trace_types_lock); |
2756 | 3382 | ||
@@ -2762,7 +3388,7 @@ static int mark_printk(const char *fmt, ...) | |||
2762 | int ret; | 3388 | int ret; |
2763 | va_list args; | 3389 | va_list args; |
2764 | va_start(args, fmt); | 3390 | va_start(args, fmt); |
2765 | ret = trace_vprintk(0, fmt, args); | 3391 | ret = trace_vprintk(0, -1, fmt, args); |
2766 | va_end(args); | 3392 | va_end(args); |
2767 | return ret; | 3393 | return ret; |
2768 | } | 3394 | } |
@@ -2773,9 +3399,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
2773 | { | 3399 | { |
2774 | char *buf; | 3400 | char *buf; |
2775 | char *end; | 3401 | char *end; |
2776 | struct trace_array *tr = &global_trace; | ||
2777 | 3402 | ||
2778 | if (!tr->ctrl || tracing_disabled) | 3403 | if (tracing_disabled) |
2779 | return -EINVAL; | 3404 | return -EINVAL; |
2780 | 3405 | ||
2781 | if (cnt > TRACE_BUF_SIZE) | 3406 | if (cnt > TRACE_BUF_SIZE) |
@@ -2841,22 +3466,38 @@ static struct file_operations tracing_mark_fops = { | |||
2841 | 3466 | ||
2842 | #ifdef CONFIG_DYNAMIC_FTRACE | 3467 | #ifdef CONFIG_DYNAMIC_FTRACE |
2843 | 3468 | ||
3469 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | ||
3470 | { | ||
3471 | return 0; | ||
3472 | } | ||
3473 | |||
2844 | static ssize_t | 3474 | static ssize_t |
2845 | tracing_read_long(struct file *filp, char __user *ubuf, | 3475 | tracing_read_dyn_info(struct file *filp, char __user *ubuf, |
2846 | size_t cnt, loff_t *ppos) | 3476 | size_t cnt, loff_t *ppos) |
2847 | { | 3477 | { |
3478 | static char ftrace_dyn_info_buffer[1024]; | ||
3479 | static DEFINE_MUTEX(dyn_info_mutex); | ||
2848 | unsigned long *p = filp->private_data; | 3480 | unsigned long *p = filp->private_data; |
2849 | char buf[64]; | 3481 | char *buf = ftrace_dyn_info_buffer; |
3482 | int size = ARRAY_SIZE(ftrace_dyn_info_buffer); | ||
2850 | int r; | 3483 | int r; |
2851 | 3484 | ||
2852 | r = sprintf(buf, "%ld\n", *p); | 3485 | mutex_lock(&dyn_info_mutex); |
3486 | r = sprintf(buf, "%ld ", *p); | ||
2853 | 3487 | ||
2854 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 3488 | r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r); |
3489 | buf[r++] = '\n'; | ||
3490 | |||
3491 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
3492 | |||
3493 | mutex_unlock(&dyn_info_mutex); | ||
3494 | |||
3495 | return r; | ||
2855 | } | 3496 | } |
2856 | 3497 | ||
2857 | static struct file_operations tracing_read_long_fops = { | 3498 | static struct file_operations tracing_dyn_info_fops = { |
2858 | .open = tracing_open_generic, | 3499 | .open = tracing_open_generic, |
2859 | .read = tracing_read_long, | 3500 | .read = tracing_read_dyn_info, |
2860 | }; | 3501 | }; |
2861 | #endif | 3502 | #endif |
2862 | 3503 | ||
@@ -2897,10 +3538,10 @@ static __init int tracer_init_debugfs(void) | |||
2897 | if (!entry) | 3538 | if (!entry) |
2898 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); | 3539 | pr_warning("Could not create debugfs 'tracing_enabled' entry\n"); |
2899 | 3540 | ||
2900 | entry = debugfs_create_file("iter_ctrl", 0644, d_tracer, | 3541 | entry = debugfs_create_file("trace_options", 0644, d_tracer, |
2901 | NULL, &tracing_iter_fops); | 3542 | NULL, &tracing_iter_fops); |
2902 | if (!entry) | 3543 | if (!entry) |
2903 | pr_warning("Could not create debugfs 'iter_ctrl' entry\n"); | 3544 | pr_warning("Could not create debugfs 'trace_options' entry\n"); |
2904 | 3545 | ||
2905 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3546 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, |
2906 | NULL, &tracing_cpumask_fops); | 3547 | NULL, &tracing_cpumask_fops); |
@@ -2950,11 +3591,11 @@ static __init int tracer_init_debugfs(void) | |||
2950 | pr_warning("Could not create debugfs " | 3591 | pr_warning("Could not create debugfs " |
2951 | "'trace_pipe' entry\n"); | 3592 | "'trace_pipe' entry\n"); |
2952 | 3593 | ||
2953 | entry = debugfs_create_file("trace_entries", 0644, d_tracer, | 3594 | entry = debugfs_create_file("buffer_size_kb", 0644, d_tracer, |
2954 | &global_trace, &tracing_entries_fops); | 3595 | &global_trace, &tracing_entries_fops); |
2955 | if (!entry) | 3596 | if (!entry) |
2956 | pr_warning("Could not create debugfs " | 3597 | pr_warning("Could not create debugfs " |
2957 | "'trace_entries' entry\n"); | 3598 | "'buffer_size_kb' entry\n"); |
2958 | 3599 | ||
2959 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, | 3600 | entry = debugfs_create_file("trace_marker", 0220, d_tracer, |
2960 | NULL, &tracing_mark_fops); | 3601 | NULL, &tracing_mark_fops); |
@@ -2965,7 +3606,7 @@ static __init int tracer_init_debugfs(void) | |||
2965 | #ifdef CONFIG_DYNAMIC_FTRACE | 3606 | #ifdef CONFIG_DYNAMIC_FTRACE |
2966 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3607 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
2967 | &ftrace_update_tot_cnt, | 3608 | &ftrace_update_tot_cnt, |
2968 | &tracing_read_long_fops); | 3609 | &tracing_dyn_info_fops); |
2969 | if (!entry) | 3610 | if (!entry) |
2970 | pr_warning("Could not create debugfs " | 3611 | pr_warning("Could not create debugfs " |
2971 | "'dyn_ftrace_total_info' entry\n"); | 3612 | "'dyn_ftrace_total_info' entry\n"); |
@@ -2976,7 +3617,7 @@ static __init int tracer_init_debugfs(void) | |||
2976 | return 0; | 3617 | return 0; |
2977 | } | 3618 | } |
2978 | 3619 | ||
2979 | int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | 3620 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
2980 | { | 3621 | { |
2981 | static DEFINE_SPINLOCK(trace_buf_lock); | 3622 | static DEFINE_SPINLOCK(trace_buf_lock); |
2982 | static char trace_buf[TRACE_BUF_SIZE]; | 3623 | static char trace_buf[TRACE_BUF_SIZE]; |
@@ -2984,11 +3625,11 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2984 | struct ring_buffer_event *event; | 3625 | struct ring_buffer_event *event; |
2985 | struct trace_array *tr = &global_trace; | 3626 | struct trace_array *tr = &global_trace; |
2986 | struct trace_array_cpu *data; | 3627 | struct trace_array_cpu *data; |
2987 | struct print_entry *entry; | ||
2988 | unsigned long flags, irq_flags; | ||
2989 | int cpu, len = 0, size, pc; | 3628 | int cpu, len = 0, size, pc; |
3629 | struct print_entry *entry; | ||
3630 | unsigned long irq_flags; | ||
2990 | 3631 | ||
2991 | if (!tr->ctrl || tracing_disabled) | 3632 | if (tracing_disabled || tracing_selftest_running) |
2992 | return 0; | 3633 | return 0; |
2993 | 3634 | ||
2994 | pc = preempt_count(); | 3635 | pc = preempt_count(); |
@@ -2999,7 +3640,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
2999 | if (unlikely(atomic_read(&data->disabled))) | 3640 | if (unlikely(atomic_read(&data->disabled))) |
3000 | goto out; | 3641 | goto out; |
3001 | 3642 | ||
3002 | spin_lock_irqsave(&trace_buf_lock, flags); | 3643 | pause_graph_tracing(); |
3644 | spin_lock_irqsave(&trace_buf_lock, irq_flags); | ||
3003 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 3645 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
3004 | 3646 | ||
3005 | len = min(len, TRACE_BUF_SIZE-1); | 3647 | len = min(len, TRACE_BUF_SIZE-1); |
@@ -3010,17 +3652,18 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) | |||
3010 | if (!event) | 3652 | if (!event) |
3011 | goto out_unlock; | 3653 | goto out_unlock; |
3012 | entry = ring_buffer_event_data(event); | 3654 | entry = ring_buffer_event_data(event); |
3013 | tracing_generic_entry_update(&entry->ent, flags, pc); | 3655 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); |
3014 | entry->ent.type = TRACE_PRINT; | 3656 | entry->ent.type = TRACE_PRINT; |
3015 | entry->ip = ip; | 3657 | entry->ip = ip; |
3658 | entry->depth = depth; | ||
3016 | 3659 | ||
3017 | memcpy(&entry->buf, trace_buf, len); | 3660 | memcpy(&entry->buf, trace_buf, len); |
3018 | entry->buf[len] = 0; | 3661 | entry->buf[len] = 0; |
3019 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 3662 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
3020 | 3663 | ||
3021 | out_unlock: | 3664 | out_unlock: |
3022 | spin_unlock_irqrestore(&trace_buf_lock, flags); | 3665 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); |
3023 | 3666 | unpause_graph_tracing(); | |
3024 | out: | 3667 | out: |
3025 | preempt_enable_notrace(); | 3668 | preempt_enable_notrace(); |
3026 | 3669 | ||
@@ -3037,7 +3680,7 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) | |||
3037 | return 0; | 3680 | return 0; |
3038 | 3681 | ||
3039 | va_start(ap, fmt); | 3682 | va_start(ap, fmt); |
3040 | ret = trace_vprintk(ip, fmt, ap); | 3683 | ret = trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); |
3041 | va_end(ap); | 3684 | va_end(ap); |
3042 | return ret; | 3685 | return ret; |
3043 | } | 3686 | } |
@@ -3046,7 +3689,8 @@ EXPORT_SYMBOL_GPL(__ftrace_printk); | |||
3046 | static int trace_panic_handler(struct notifier_block *this, | 3689 | static int trace_panic_handler(struct notifier_block *this, |
3047 | unsigned long event, void *unused) | 3690 | unsigned long event, void *unused) |
3048 | { | 3691 | { |
3049 | ftrace_dump(); | 3692 | if (ftrace_dump_on_oops) |
3693 | ftrace_dump(); | ||
3050 | return NOTIFY_OK; | 3694 | return NOTIFY_OK; |
3051 | } | 3695 | } |
3052 | 3696 | ||
@@ -3062,7 +3706,8 @@ static int trace_die_handler(struct notifier_block *self, | |||
3062 | { | 3706 | { |
3063 | switch (val) { | 3707 | switch (val) { |
3064 | case DIE_OOPS: | 3708 | case DIE_OOPS: |
3065 | ftrace_dump(); | 3709 | if (ftrace_dump_on_oops) |
3710 | ftrace_dump(); | ||
3066 | break; | 3711 | break; |
3067 | default: | 3712 | default: |
3068 | break; | 3713 | break; |
@@ -3103,7 +3748,6 @@ trace_printk_seq(struct trace_seq *s) | |||
3103 | trace_seq_reset(s); | 3748 | trace_seq_reset(s); |
3104 | } | 3749 | } |
3105 | 3750 | ||
3106 | |||
3107 | void ftrace_dump(void) | 3751 | void ftrace_dump(void) |
3108 | { | 3752 | { |
3109 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 3753 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
@@ -3128,6 +3772,9 @@ void ftrace_dump(void) | |||
3128 | atomic_inc(&global_trace.data[cpu]->disabled); | 3772 | atomic_inc(&global_trace.data[cpu]->disabled); |
3129 | } | 3773 | } |
3130 | 3774 | ||
3775 | /* don't look at user memory in panic mode */ | ||
3776 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | ||
3777 | |||
3131 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 3778 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); |
3132 | 3779 | ||
3133 | iter.tr = &global_trace; | 3780 | iter.tr = &global_trace; |
@@ -3221,7 +3868,6 @@ __init static int tracer_alloc_buffers(void) | |||
3221 | #endif | 3868 | #endif |
3222 | 3869 | ||
3223 | /* All seems OK, enable tracing */ | 3870 | /* All seems OK, enable tracing */ |
3224 | global_trace.ctrl = tracer_enabled; | ||
3225 | tracing_disabled = 0; | 3871 | tracing_disabled = 0; |
3226 | 3872 | ||
3227 | atomic_notifier_chain_register(&panic_notifier_list, | 3873 | atomic_notifier_chain_register(&panic_notifier_list, |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 8465ad052707..cc7a4f864036 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/ring_buffer.h> | 8 | #include <linux/ring_buffer.h> |
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | ||
11 | 12 | ||
12 | enum trace_type { | 13 | enum trace_type { |
13 | __TRACE_FIRST_TYPE = 0, | 14 | __TRACE_FIRST_TYPE = 0, |
@@ -21,7 +22,14 @@ enum trace_type { | |||
21 | TRACE_SPECIAL, | 22 | TRACE_SPECIAL, |
22 | TRACE_MMIO_RW, | 23 | TRACE_MMIO_RW, |
23 | TRACE_MMIO_MAP, | 24 | TRACE_MMIO_MAP, |
24 | TRACE_BOOT, | 25 | TRACE_BRANCH, |
26 | TRACE_BOOT_CALL, | ||
27 | TRACE_BOOT_RET, | ||
28 | TRACE_GRAPH_RET, | ||
29 | TRACE_GRAPH_ENT, | ||
30 | TRACE_USER_STACK, | ||
31 | TRACE_HW_BRANCHES, | ||
32 | TRACE_POWER, | ||
25 | 33 | ||
26 | __TRACE_LAST_TYPE | 34 | __TRACE_LAST_TYPE |
27 | }; | 35 | }; |
@@ -38,6 +46,7 @@ struct trace_entry { | |||
38 | unsigned char flags; | 46 | unsigned char flags; |
39 | unsigned char preempt_count; | 47 | unsigned char preempt_count; |
40 | int pid; | 48 | int pid; |
49 | int tgid; | ||
41 | }; | 50 | }; |
42 | 51 | ||
43 | /* | 52 | /* |
@@ -48,6 +57,18 @@ struct ftrace_entry { | |||
48 | unsigned long ip; | 57 | unsigned long ip; |
49 | unsigned long parent_ip; | 58 | unsigned long parent_ip; |
50 | }; | 59 | }; |
60 | |||
61 | /* Function call entry */ | ||
62 | struct ftrace_graph_ent_entry { | ||
63 | struct trace_entry ent; | ||
64 | struct ftrace_graph_ent graph_ent; | ||
65 | }; | ||
66 | |||
67 | /* Function return entry */ | ||
68 | struct ftrace_graph_ret_entry { | ||
69 | struct trace_entry ent; | ||
70 | struct ftrace_graph_ret ret; | ||
71 | }; | ||
51 | extern struct tracer boot_tracer; | 72 | extern struct tracer boot_tracer; |
52 | 73 | ||
53 | /* | 74 | /* |
@@ -85,12 +106,18 @@ struct stack_entry { | |||
85 | unsigned long caller[FTRACE_STACK_ENTRIES]; | 106 | unsigned long caller[FTRACE_STACK_ENTRIES]; |
86 | }; | 107 | }; |
87 | 108 | ||
109 | struct userstack_entry { | ||
110 | struct trace_entry ent; | ||
111 | unsigned long caller[FTRACE_STACK_ENTRIES]; | ||
112 | }; | ||
113 | |||
88 | /* | 114 | /* |
89 | * ftrace_printk entry: | 115 | * ftrace_printk entry: |
90 | */ | 116 | */ |
91 | struct print_entry { | 117 | struct print_entry { |
92 | struct trace_entry ent; | 118 | struct trace_entry ent; |
93 | unsigned long ip; | 119 | unsigned long ip; |
120 | int depth; | ||
94 | char buf[]; | 121 | char buf[]; |
95 | }; | 122 | }; |
96 | 123 | ||
@@ -112,9 +139,35 @@ struct trace_mmiotrace_map { | |||
112 | struct mmiotrace_map map; | 139 | struct mmiotrace_map map; |
113 | }; | 140 | }; |
114 | 141 | ||
115 | struct trace_boot { | 142 | struct trace_boot_call { |
116 | struct trace_entry ent; | 143 | struct trace_entry ent; |
117 | struct boot_trace initcall; | 144 | struct boot_trace_call boot_call; |
145 | }; | ||
146 | |||
147 | struct trace_boot_ret { | ||
148 | struct trace_entry ent; | ||
149 | struct boot_trace_ret boot_ret; | ||
150 | }; | ||
151 | |||
152 | #define TRACE_FUNC_SIZE 30 | ||
153 | #define TRACE_FILE_SIZE 20 | ||
154 | struct trace_branch { | ||
155 | struct trace_entry ent; | ||
156 | unsigned line; | ||
157 | char func[TRACE_FUNC_SIZE+1]; | ||
158 | char file[TRACE_FILE_SIZE+1]; | ||
159 | char correct; | ||
160 | }; | ||
161 | |||
162 | struct hw_branch_entry { | ||
163 | struct trace_entry ent; | ||
164 | u64 from; | ||
165 | u64 to; | ||
166 | }; | ||
167 | |||
168 | struct trace_power { | ||
169 | struct trace_entry ent; | ||
170 | struct power_trace state_data; | ||
118 | }; | 171 | }; |
119 | 172 | ||
120 | /* | 173 | /* |
@@ -172,7 +225,6 @@ struct trace_iterator; | |||
172 | struct trace_array { | 225 | struct trace_array { |
173 | struct ring_buffer *buffer; | 226 | struct ring_buffer *buffer; |
174 | unsigned long entries; | 227 | unsigned long entries; |
175 | long ctrl; | ||
176 | int cpu; | 228 | int cpu; |
177 | cycle_t time_start; | 229 | cycle_t time_start; |
178 | struct task_struct *waiter; | 230 | struct task_struct *waiter; |
@@ -212,13 +264,22 @@ extern void __ftrace_bad_type(void); | |||
212 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | 264 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
213 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ | 265 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ |
214 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | 266 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
267 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | ||
215 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 268 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
216 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | 269 | IF_ASSIGN(var, ent, struct special_entry, 0); \ |
217 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 270 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
218 | TRACE_MMIO_RW); \ | 271 | TRACE_MMIO_RW); \ |
219 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 272 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
220 | TRACE_MMIO_MAP); \ | 273 | TRACE_MMIO_MAP); \ |
221 | IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \ | 274 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ |
275 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | ||
276 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | ||
277 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ | ||
278 | TRACE_GRAPH_ENT); \ | ||
279 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | ||
280 | TRACE_GRAPH_RET); \ | ||
281 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | ||
282 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | ||
222 | __ftrace_bad_type(); \ | 283 | __ftrace_bad_type(); \ |
223 | } while (0) | 284 | } while (0) |
224 | 285 | ||
@@ -229,29 +290,56 @@ enum print_line_t { | |||
229 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | 290 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ |
230 | }; | 291 | }; |
231 | 292 | ||
293 | |||
294 | /* | ||
295 | * An option specific to a tracer. This is a boolean value. | ||
296 | * The bit is the bit index that sets its value on the | ||
297 | * flags value in struct tracer_flags. | ||
298 | */ | ||
299 | struct tracer_opt { | ||
300 | const char *name; /* Will appear on the trace_options file */ | ||
301 | u32 bit; /* Mask assigned in val field in tracer_flags */ | ||
302 | }; | ||
303 | |||
304 | /* | ||
305 | * The set of specific options for a tracer. Your tracer | ||
306 | * have to set the initial value of the flags val. | ||
307 | */ | ||
308 | struct tracer_flags { | ||
309 | u32 val; | ||
310 | struct tracer_opt *opts; | ||
311 | }; | ||
312 | |||
313 | /* Makes more easy to define a tracer opt */ | ||
314 | #define TRACER_OPT(s, b) .name = #s, .bit = b | ||
315 | |||
232 | /* | 316 | /* |
233 | * A specific tracer, represented by methods that operate on a trace array: | 317 | * A specific tracer, represented by methods that operate on a trace array: |
234 | */ | 318 | */ |
235 | struct tracer { | 319 | struct tracer { |
236 | const char *name; | 320 | const char *name; |
237 | void (*init)(struct trace_array *tr); | 321 | /* Your tracer should raise a warning if init fails */ |
322 | int (*init)(struct trace_array *tr); | ||
238 | void (*reset)(struct trace_array *tr); | 323 | void (*reset)(struct trace_array *tr); |
324 | void (*start)(struct trace_array *tr); | ||
325 | void (*stop)(struct trace_array *tr); | ||
239 | void (*open)(struct trace_iterator *iter); | 326 | void (*open)(struct trace_iterator *iter); |
240 | void (*pipe_open)(struct trace_iterator *iter); | 327 | void (*pipe_open)(struct trace_iterator *iter); |
241 | void (*close)(struct trace_iterator *iter); | 328 | void (*close)(struct trace_iterator *iter); |
242 | void (*start)(struct trace_iterator *iter); | ||
243 | void (*stop)(struct trace_iterator *iter); | ||
244 | ssize_t (*read)(struct trace_iterator *iter, | 329 | ssize_t (*read)(struct trace_iterator *iter, |
245 | struct file *filp, char __user *ubuf, | 330 | struct file *filp, char __user *ubuf, |
246 | size_t cnt, loff_t *ppos); | 331 | size_t cnt, loff_t *ppos); |
247 | void (*ctrl_update)(struct trace_array *tr); | ||
248 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 332 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
249 | int (*selftest)(struct tracer *trace, | 333 | int (*selftest)(struct tracer *trace, |
250 | struct trace_array *tr); | 334 | struct trace_array *tr); |
251 | #endif | 335 | #endif |
336 | void (*print_header)(struct seq_file *m); | ||
252 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 337 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
338 | /* If you handled the flag setting, return 0 */ | ||
339 | int (*set_flag)(u32 old_flags, u32 bit, int set); | ||
253 | struct tracer *next; | 340 | struct tracer *next; |
254 | int print_max; | 341 | int print_max; |
342 | struct tracer_flags *flags; | ||
255 | }; | 343 | }; |
256 | 344 | ||
257 | struct trace_seq { | 345 | struct trace_seq { |
@@ -279,10 +367,14 @@ struct trace_iterator { | |||
279 | unsigned long iter_flags; | 367 | unsigned long iter_flags; |
280 | loff_t pos; | 368 | loff_t pos; |
281 | long idx; | 369 | long idx; |
370 | |||
371 | cpumask_t started; | ||
282 | }; | 372 | }; |
283 | 373 | ||
374 | int tracing_is_enabled(void); | ||
284 | void trace_wake_up(void); | 375 | void trace_wake_up(void); |
285 | void tracing_reset(struct trace_array *tr, int cpu); | 376 | void tracing_reset(struct trace_array *tr, int cpu); |
377 | void tracing_reset_online_cpus(struct trace_array *tr); | ||
286 | int tracing_open_generic(struct inode *inode, struct file *filp); | 378 | int tracing_open_generic(struct inode *inode, struct file *filp); |
287 | struct dentry *tracing_init_dentry(void); | 379 | struct dentry *tracing_init_dentry(void); |
288 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | 380 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
@@ -321,8 +413,15 @@ void trace_function(struct trace_array *tr, | |||
321 | unsigned long parent_ip, | 413 | unsigned long parent_ip, |
322 | unsigned long flags, int pc); | 414 | unsigned long flags, int pc); |
323 | 415 | ||
416 | void trace_graph_return(struct ftrace_graph_ret *trace); | ||
417 | int trace_graph_entry(struct ftrace_graph_ent *trace); | ||
418 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to); | ||
419 | |||
324 | void tracing_start_cmdline_record(void); | 420 | void tracing_start_cmdline_record(void); |
325 | void tracing_stop_cmdline_record(void); | 421 | void tracing_stop_cmdline_record(void); |
422 | void tracing_sched_switch_assign_trace(struct trace_array *tr); | ||
423 | void tracing_stop_sched_switch_record(void); | ||
424 | void tracing_start_sched_switch_record(void); | ||
326 | int register_tracer(struct tracer *type); | 425 | int register_tracer(struct tracer *type); |
327 | void unregister_tracer(struct tracer *type); | 426 | void unregister_tracer(struct tracer *type); |
328 | 427 | ||
@@ -358,6 +457,7 @@ struct tracer_switch_ops { | |||
358 | struct tracer_switch_ops *next; | 457 | struct tracer_switch_ops *next; |
359 | }; | 458 | }; |
360 | 459 | ||
460 | char *trace_find_cmdline(int pid); | ||
361 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 461 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
362 | 462 | ||
363 | #ifdef CONFIG_DYNAMIC_FTRACE | 463 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -383,19 +483,79 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace, | |||
383 | struct trace_array *tr); | 483 | struct trace_array *tr); |
384 | extern int trace_selftest_startup_sysprof(struct tracer *trace, | 484 | extern int trace_selftest_startup_sysprof(struct tracer *trace, |
385 | struct trace_array *tr); | 485 | struct trace_array *tr); |
486 | extern int trace_selftest_startup_branch(struct tracer *trace, | ||
487 | struct trace_array *tr); | ||
386 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 488 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
387 | 489 | ||
388 | extern void *head_page(struct trace_array_cpu *data); | 490 | extern void *head_page(struct trace_array_cpu *data); |
389 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | 491 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); |
390 | extern void trace_seq_print_cont(struct trace_seq *s, | 492 | extern void trace_seq_print_cont(struct trace_seq *s, |
391 | struct trace_iterator *iter); | 493 | struct trace_iterator *iter); |
494 | |||
495 | extern int | ||
496 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
497 | unsigned long sym_flags); | ||
392 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | 498 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, |
393 | size_t cnt); | 499 | size_t cnt); |
394 | extern long ns2usecs(cycle_t nsec); | 500 | extern long ns2usecs(cycle_t nsec); |
395 | extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args); | 501 | extern int |
502 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); | ||
396 | 503 | ||
397 | extern unsigned long trace_flags; | 504 | extern unsigned long trace_flags; |
398 | 505 | ||
506 | /* Standard output formatting function used for function return traces */ | ||
507 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
508 | extern enum print_line_t print_graph_function(struct trace_iterator *iter); | ||
509 | |||
510 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
511 | /* TODO: make this variable */ | ||
512 | #define FTRACE_GRAPH_MAX_FUNCS 32 | ||
513 | extern int ftrace_graph_count; | ||
514 | extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS]; | ||
515 | |||
516 | static inline int ftrace_graph_addr(unsigned long addr) | ||
517 | { | ||
518 | int i; | ||
519 | |||
520 | if (!ftrace_graph_count || test_tsk_trace_graph(current)) | ||
521 | return 1; | ||
522 | |||
523 | for (i = 0; i < ftrace_graph_count; i++) { | ||
524 | if (addr == ftrace_graph_funcs[i]) | ||
525 | return 1; | ||
526 | } | ||
527 | |||
528 | return 0; | ||
529 | } | ||
530 | #else | ||
531 | static inline int ftrace_trace_addr(unsigned long addr) | ||
532 | { | ||
533 | return 1; | ||
534 | } | ||
535 | static inline int ftrace_graph_addr(unsigned long addr) | ||
536 | { | ||
537 | return 1; | ||
538 | } | ||
539 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
540 | |||
541 | #else /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
542 | static inline enum print_line_t | ||
543 | print_graph_function(struct trace_iterator *iter) | ||
544 | { | ||
545 | return TRACE_TYPE_UNHANDLED; | ||
546 | } | ||
547 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
548 | |||
549 | extern struct pid *ftrace_pid_trace; | ||
550 | |||
551 | static inline int ftrace_trace_task(struct task_struct *task) | ||
552 | { | ||
553 | if (!ftrace_pid_trace) | ||
554 | return 1; | ||
555 | |||
556 | return test_tsk_trace_trace(task); | ||
557 | } | ||
558 | |||
399 | /* | 559 | /* |
400 | * trace_iterator_flags is an enumeration that defines bit | 560 | * trace_iterator_flags is an enumeration that defines bit |
401 | * positions into trace_flags that controls the output. | 561 | * positions into trace_flags that controls the output. |
@@ -415,8 +575,93 @@ enum trace_iterator_flags { | |||
415 | TRACE_ITER_STACKTRACE = 0x100, | 575 | TRACE_ITER_STACKTRACE = 0x100, |
416 | TRACE_ITER_SCHED_TREE = 0x200, | 576 | TRACE_ITER_SCHED_TREE = 0x200, |
417 | TRACE_ITER_PRINTK = 0x400, | 577 | TRACE_ITER_PRINTK = 0x400, |
578 | TRACE_ITER_PREEMPTONLY = 0x800, | ||
579 | TRACE_ITER_BRANCH = 0x1000, | ||
580 | TRACE_ITER_ANNOTATE = 0x2000, | ||
581 | TRACE_ITER_USERSTACKTRACE = 0x4000, | ||
582 | TRACE_ITER_SYM_USEROBJ = 0x8000, | ||
583 | TRACE_ITER_PRINTK_MSGONLY = 0x10000 | ||
418 | }; | 584 | }; |
419 | 585 | ||
586 | /* | ||
587 | * TRACE_ITER_SYM_MASK masks the options in trace_flags that | ||
588 | * control the output of kernel symbols. | ||
589 | */ | ||
590 | #define TRACE_ITER_SYM_MASK \ | ||
591 | (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) | ||
592 | |||
420 | extern struct tracer nop_trace; | 593 | extern struct tracer nop_trace; |
421 | 594 | ||
595 | /** | ||
596 | * ftrace_preempt_disable - disable preemption scheduler safe | ||
597 | * | ||
598 | * When tracing can happen inside the scheduler, there exists | ||
599 | * cases that the tracing might happen before the need_resched | ||
600 | * flag is checked. If this happens and the tracer calls | ||
601 | * preempt_enable (after a disable), a schedule might take place | ||
602 | * causing an infinite recursion. | ||
603 | * | ||
604 | * To prevent this, we read the need_recshed flag before | ||
605 | * disabling preemption. When we want to enable preemption we | ||
606 | * check the flag, if it is set, then we call preempt_enable_no_resched. | ||
607 | * Otherwise, we call preempt_enable. | ||
608 | * | ||
609 | * The rational for doing the above is that if need resched is set | ||
610 | * and we have yet to reschedule, we are either in an atomic location | ||
611 | * (where we do not need to check for scheduling) or we are inside | ||
612 | * the scheduler and do not want to resched. | ||
613 | */ | ||
614 | static inline int ftrace_preempt_disable(void) | ||
615 | { | ||
616 | int resched; | ||
617 | |||
618 | resched = need_resched(); | ||
619 | preempt_disable_notrace(); | ||
620 | |||
621 | return resched; | ||
622 | } | ||
623 | |||
624 | /** | ||
625 | * ftrace_preempt_enable - enable preemption scheduler safe | ||
626 | * @resched: the return value from ftrace_preempt_disable | ||
627 | * | ||
628 | * This is a scheduler safe way to enable preemption and not miss | ||
629 | * any preemption checks. The disabled saved the state of preemption. | ||
630 | * If resched is set, then we were either inside an atomic or | ||
631 | * are inside the scheduler (we would have already scheduled | ||
632 | * otherwise). In this case, we do not want to call normal | ||
633 | * preempt_enable, but preempt_enable_no_resched instead. | ||
634 | */ | ||
635 | static inline void ftrace_preempt_enable(int resched) | ||
636 | { | ||
637 | if (resched) | ||
638 | preempt_enable_no_resched_notrace(); | ||
639 | else | ||
640 | preempt_enable_notrace(); | ||
641 | } | ||
642 | |||
643 | #ifdef CONFIG_BRANCH_TRACER | ||
644 | extern int enable_branch_tracing(struct trace_array *tr); | ||
645 | extern void disable_branch_tracing(void); | ||
646 | static inline int trace_branch_enable(struct trace_array *tr) | ||
647 | { | ||
648 | if (trace_flags & TRACE_ITER_BRANCH) | ||
649 | return enable_branch_tracing(tr); | ||
650 | return 0; | ||
651 | } | ||
652 | static inline void trace_branch_disable(void) | ||
653 | { | ||
654 | /* due to races, always disable */ | ||
655 | disable_branch_tracing(); | ||
656 | } | ||
657 | #else | ||
658 | static inline int trace_branch_enable(struct trace_array *tr) | ||
659 | { | ||
660 | return 0; | ||
661 | } | ||
662 | static inline void trace_branch_disable(void) | ||
663 | { | ||
664 | } | ||
665 | #endif /* CONFIG_BRANCH_TRACER */ | ||
666 | |||
422 | #endif /* _LINUX_KERNEL_TRACE_H */ | 667 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index d0a5e50eeff2..3ccebde28482 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -13,101 +13,161 @@ | |||
13 | #include "trace.h" | 13 | #include "trace.h" |
14 | 14 | ||
15 | static struct trace_array *boot_trace; | 15 | static struct trace_array *boot_trace; |
16 | static int trace_boot_enabled; | 16 | static bool pre_initcalls_finished; |
17 | 17 | ||
18 | 18 | /* Tells the boot tracer that the pre_smp_initcalls are finished. | |
19 | /* Should be started after do_pre_smp_initcalls() in init/main.c */ | 19 | * So we are ready . |
20 | * It doesn't enable sched events tracing however. | ||
21 | * You have to call enable_boot_trace to do so. | ||
22 | */ | ||
20 | void start_boot_trace(void) | 23 | void start_boot_trace(void) |
21 | { | 24 | { |
22 | trace_boot_enabled = 1; | 25 | pre_initcalls_finished = true; |
23 | } | 26 | } |
24 | 27 | ||
25 | void stop_boot_trace(void) | 28 | void enable_boot_trace(void) |
26 | { | 29 | { |
27 | trace_boot_enabled = 0; | 30 | if (pre_initcalls_finished) |
31 | tracing_start_sched_switch_record(); | ||
28 | } | 32 | } |
29 | 33 | ||
30 | void reset_boot_trace(struct trace_array *tr) | 34 | void disable_boot_trace(void) |
31 | { | 35 | { |
32 | stop_boot_trace(); | 36 | if (pre_initcalls_finished) |
37 | tracing_stop_sched_switch_record(); | ||
33 | } | 38 | } |
34 | 39 | ||
35 | static void boot_trace_init(struct trace_array *tr) | 40 | static int boot_trace_init(struct trace_array *tr) |
36 | { | 41 | { |
37 | int cpu; | 42 | int cpu; |
38 | boot_trace = tr; | 43 | boot_trace = tr; |
39 | 44 | ||
40 | trace_boot_enabled = 0; | ||
41 | |||
42 | for_each_cpu_mask(cpu, cpu_possible_map) | 45 | for_each_cpu_mask(cpu, cpu_possible_map) |
43 | tracing_reset(tr, cpu); | 46 | tracing_reset(tr, cpu); |
47 | |||
48 | tracing_sched_switch_assign_trace(tr); | ||
49 | return 0; | ||
44 | } | 50 | } |
45 | 51 | ||
46 | static void boot_trace_ctrl_update(struct trace_array *tr) | 52 | static enum print_line_t |
53 | initcall_call_print_line(struct trace_iterator *iter) | ||
47 | { | 54 | { |
48 | if (tr->ctrl) | 55 | struct trace_entry *entry = iter->ent; |
49 | start_boot_trace(); | 56 | struct trace_seq *s = &iter->seq; |
57 | struct trace_boot_call *field; | ||
58 | struct boot_trace_call *call; | ||
59 | u64 ts; | ||
60 | unsigned long nsec_rem; | ||
61 | int ret; | ||
62 | |||
63 | trace_assign_type(field, entry); | ||
64 | call = &field->boot_call; | ||
65 | ts = iter->ts; | ||
66 | nsec_rem = do_div(ts, 1000000000); | ||
67 | |||
68 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | ||
69 | (unsigned long)ts, nsec_rem, call->func, call->caller); | ||
70 | |||
71 | if (!ret) | ||
72 | return TRACE_TYPE_PARTIAL_LINE; | ||
50 | else | 73 | else |
51 | stop_boot_trace(); | 74 | return TRACE_TYPE_HANDLED; |
52 | } | 75 | } |
53 | 76 | ||
54 | static enum print_line_t initcall_print_line(struct trace_iterator *iter) | 77 | static enum print_line_t |
78 | initcall_ret_print_line(struct trace_iterator *iter) | ||
55 | { | 79 | { |
56 | int ret; | ||
57 | struct trace_entry *entry = iter->ent; | 80 | struct trace_entry *entry = iter->ent; |
58 | struct trace_boot *field = (struct trace_boot *)entry; | ||
59 | struct boot_trace *it = &field->initcall; | ||
60 | struct trace_seq *s = &iter->seq; | 81 | struct trace_seq *s = &iter->seq; |
61 | struct timespec calltime = ktime_to_timespec(it->calltime); | 82 | struct trace_boot_ret *field; |
62 | struct timespec rettime = ktime_to_timespec(it->rettime); | 83 | struct boot_trace_ret *init_ret; |
63 | 84 | u64 ts; | |
64 | if (entry->type == TRACE_BOOT) { | 85 | unsigned long nsec_rem; |
65 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | 86 | int ret; |
66 | calltime.tv_sec, | 87 | |
67 | calltime.tv_nsec, | 88 | trace_assign_type(field, entry); |
68 | it->func, it->caller); | 89 | init_ret = &field->boot_ret; |
69 | if (!ret) | 90 | ts = iter->ts; |
70 | return TRACE_TYPE_PARTIAL_LINE; | 91 | nsec_rem = do_div(ts, 1000000000); |
71 | 92 | ||
72 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " | 93 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " |
73 | "returned %d after %lld msecs\n", | 94 | "returned %d after %llu msecs\n", |
74 | rettime.tv_sec, | 95 | (unsigned long) ts, |
75 | rettime.tv_nsec, | 96 | nsec_rem, |
76 | it->func, it->result, it->duration); | 97 | init_ret->func, init_ret->result, init_ret->duration); |
77 | 98 | ||
78 | if (!ret) | 99 | if (!ret) |
79 | return TRACE_TYPE_PARTIAL_LINE; | 100 | return TRACE_TYPE_PARTIAL_LINE; |
101 | else | ||
80 | return TRACE_TYPE_HANDLED; | 102 | return TRACE_TYPE_HANDLED; |
103 | } | ||
104 | |||
105 | static enum print_line_t initcall_print_line(struct trace_iterator *iter) | ||
106 | { | ||
107 | struct trace_entry *entry = iter->ent; | ||
108 | |||
109 | switch (entry->type) { | ||
110 | case TRACE_BOOT_CALL: | ||
111 | return initcall_call_print_line(iter); | ||
112 | case TRACE_BOOT_RET: | ||
113 | return initcall_ret_print_line(iter); | ||
114 | default: | ||
115 | return TRACE_TYPE_UNHANDLED; | ||
81 | } | 116 | } |
82 | return TRACE_TYPE_UNHANDLED; | ||
83 | } | 117 | } |
84 | 118 | ||
85 | struct tracer boot_tracer __read_mostly = | 119 | struct tracer boot_tracer __read_mostly = |
86 | { | 120 | { |
87 | .name = "initcall", | 121 | .name = "initcall", |
88 | .init = boot_trace_init, | 122 | .init = boot_trace_init, |
89 | .reset = reset_boot_trace, | 123 | .reset = tracing_reset_online_cpus, |
90 | .ctrl_update = boot_trace_ctrl_update, | ||
91 | .print_line = initcall_print_line, | 124 | .print_line = initcall_print_line, |
92 | }; | 125 | }; |
93 | 126 | ||
94 | void trace_boot(struct boot_trace *it, initcall_t fn) | 127 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) |
95 | { | 128 | { |
96 | struct ring_buffer_event *event; | 129 | struct ring_buffer_event *event; |
97 | struct trace_boot *entry; | 130 | struct trace_boot_call *entry; |
98 | struct trace_array_cpu *data; | ||
99 | unsigned long irq_flags; | 131 | unsigned long irq_flags; |
100 | struct trace_array *tr = boot_trace; | 132 | struct trace_array *tr = boot_trace; |
101 | 133 | ||
102 | if (!trace_boot_enabled) | 134 | if (!pre_initcalls_finished) |
103 | return; | 135 | return; |
104 | 136 | ||
105 | /* Get its name now since this function could | 137 | /* Get its name now since this function could |
106 | * disappear because it is in the .init section. | 138 | * disappear because it is in the .init section. |
107 | */ | 139 | */ |
108 | sprint_symbol(it->func, (unsigned long)fn); | 140 | sprint_symbol(bt->func, (unsigned long)fn); |
141 | preempt_disable(); | ||
142 | |||
143 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
144 | &irq_flags); | ||
145 | if (!event) | ||
146 | goto out; | ||
147 | entry = ring_buffer_event_data(event); | ||
148 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
149 | entry->ent.type = TRACE_BOOT_CALL; | ||
150 | entry->boot_call = *bt; | ||
151 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
152 | |||
153 | trace_wake_up(); | ||
154 | |||
155 | out: | ||
156 | preempt_enable(); | ||
157 | } | ||
158 | |||
159 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | ||
160 | { | ||
161 | struct ring_buffer_event *event; | ||
162 | struct trace_boot_ret *entry; | ||
163 | unsigned long irq_flags; | ||
164 | struct trace_array *tr = boot_trace; | ||
165 | |||
166 | if (!pre_initcalls_finished) | ||
167 | return; | ||
168 | |||
169 | sprint_symbol(bt->func, (unsigned long)fn); | ||
109 | preempt_disable(); | 170 | preempt_disable(); |
110 | data = tr->data[smp_processor_id()]; | ||
111 | 171 | ||
112 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 172 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), |
113 | &irq_flags); | 173 | &irq_flags); |
@@ -115,8 +175,8 @@ void trace_boot(struct boot_trace *it, initcall_t fn) | |||
115 | goto out; | 175 | goto out; |
116 | entry = ring_buffer_event_data(event); | 176 | entry = ring_buffer_event_data(event); |
117 | tracing_generic_entry_update(&entry->ent, 0, 0); | 177 | tracing_generic_entry_update(&entry->ent, 0, 0); |
118 | entry->ent.type = TRACE_BOOT; | 178 | entry->ent.type = TRACE_BOOT_RET; |
119 | entry->initcall = *it; | 179 | entry->boot_ret = *bt; |
120 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 180 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); |
121 | 181 | ||
122 | trace_wake_up(); | 182 | trace_wake_up(); |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c new file mode 100644 index 000000000000..6c00feb3bac7 --- /dev/null +++ b/kernel/trace/trace_branch.c | |||
@@ -0,0 +1,342 @@ | |||
1 | /* | ||
2 | * unlikely profiler | ||
3 | * | ||
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | ||
5 | */ | ||
6 | #include <linux/kallsyms.h> | ||
7 | #include <linux/seq_file.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <linux/irqflags.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/ftrace.h> | ||
14 | #include <linux/hash.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <asm/local.h> | ||
17 | #include "trace.h" | ||
18 | |||
19 | #ifdef CONFIG_BRANCH_TRACER | ||
20 | |||
21 | static int branch_tracing_enabled __read_mostly; | ||
22 | static DEFINE_MUTEX(branch_tracing_mutex); | ||
23 | static struct trace_array *branch_tracer; | ||
24 | |||
25 | static void | ||
26 | probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | ||
27 | { | ||
28 | struct trace_array *tr = branch_tracer; | ||
29 | struct ring_buffer_event *event; | ||
30 | struct trace_branch *entry; | ||
31 | unsigned long flags, irq_flags; | ||
32 | int cpu, pc; | ||
33 | const char *p; | ||
34 | |||
35 | /* | ||
36 | * I would love to save just the ftrace_likely_data pointer, but | ||
37 | * this code can also be used by modules. Ugly things can happen | ||
38 | * if the module is unloaded, and then we go and read the | ||
39 | * pointer. This is slower, but much safer. | ||
40 | */ | ||
41 | |||
42 | if (unlikely(!tr)) | ||
43 | return; | ||
44 | |||
45 | local_irq_save(flags); | ||
46 | cpu = raw_smp_processor_id(); | ||
47 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | ||
48 | goto out; | ||
49 | |||
50 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
51 | &irq_flags); | ||
52 | if (!event) | ||
53 | goto out; | ||
54 | |||
55 | pc = preempt_count(); | ||
56 | entry = ring_buffer_event_data(event); | ||
57 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
58 | entry->ent.type = TRACE_BRANCH; | ||
59 | |||
60 | /* Strip off the path, only save the file */ | ||
61 | p = f->file + strlen(f->file); | ||
62 | while (p >= f->file && *p != '/') | ||
63 | p--; | ||
64 | p++; | ||
65 | |||
66 | strncpy(entry->func, f->func, TRACE_FUNC_SIZE); | ||
67 | strncpy(entry->file, p, TRACE_FILE_SIZE); | ||
68 | entry->func[TRACE_FUNC_SIZE] = 0; | ||
69 | entry->file[TRACE_FILE_SIZE] = 0; | ||
70 | entry->line = f->line; | ||
71 | entry->correct = val == expect; | ||
72 | |||
73 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
74 | |||
75 | out: | ||
76 | atomic_dec(&tr->data[cpu]->disabled); | ||
77 | local_irq_restore(flags); | ||
78 | } | ||
79 | |||
80 | static inline | ||
81 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | ||
82 | { | ||
83 | if (!branch_tracing_enabled) | ||
84 | return; | ||
85 | |||
86 | probe_likely_condition(f, val, expect); | ||
87 | } | ||
88 | |||
89 | int enable_branch_tracing(struct trace_array *tr) | ||
90 | { | ||
91 | int ret = 0; | ||
92 | |||
93 | mutex_lock(&branch_tracing_mutex); | ||
94 | branch_tracer = tr; | ||
95 | /* | ||
96 | * Must be seen before enabling. The reader is a condition | ||
97 | * where we do not need a matching rmb() | ||
98 | */ | ||
99 | smp_wmb(); | ||
100 | branch_tracing_enabled++; | ||
101 | mutex_unlock(&branch_tracing_mutex); | ||
102 | |||
103 | return ret; | ||
104 | } | ||
105 | |||
106 | void disable_branch_tracing(void) | ||
107 | { | ||
108 | mutex_lock(&branch_tracing_mutex); | ||
109 | |||
110 | if (!branch_tracing_enabled) | ||
111 | goto out_unlock; | ||
112 | |||
113 | branch_tracing_enabled--; | ||
114 | |||
115 | out_unlock: | ||
116 | mutex_unlock(&branch_tracing_mutex); | ||
117 | } | ||
118 | |||
119 | static void start_branch_trace(struct trace_array *tr) | ||
120 | { | ||
121 | enable_branch_tracing(tr); | ||
122 | } | ||
123 | |||
124 | static void stop_branch_trace(struct trace_array *tr) | ||
125 | { | ||
126 | disable_branch_tracing(); | ||
127 | } | ||
128 | |||
129 | static int branch_trace_init(struct trace_array *tr) | ||
130 | { | ||
131 | int cpu; | ||
132 | |||
133 | for_each_online_cpu(cpu) | ||
134 | tracing_reset(tr, cpu); | ||
135 | |||
136 | start_branch_trace(tr); | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static void branch_trace_reset(struct trace_array *tr) | ||
141 | { | ||
142 | stop_branch_trace(tr); | ||
143 | } | ||
144 | |||
145 | struct tracer branch_trace __read_mostly = | ||
146 | { | ||
147 | .name = "branch", | ||
148 | .init = branch_trace_init, | ||
149 | .reset = branch_trace_reset, | ||
150 | #ifdef CONFIG_FTRACE_SELFTEST | ||
151 | .selftest = trace_selftest_startup_branch, | ||
152 | #endif | ||
153 | }; | ||
154 | |||
155 | __init static int init_branch_trace(void) | ||
156 | { | ||
157 | return register_tracer(&branch_trace); | ||
158 | } | ||
159 | |||
160 | device_initcall(init_branch_trace); | ||
161 | #else | ||
162 | static inline | ||
163 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | ||
164 | { | ||
165 | } | ||
166 | #endif /* CONFIG_BRANCH_TRACER */ | ||
167 | |||
168 | void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) | ||
169 | { | ||
170 | /* | ||
171 | * I would love to have a trace point here instead, but the | ||
172 | * trace point code is so inundated with unlikely and likely | ||
173 | * conditions that the recursive nightmare that exists is too | ||
174 | * much to try to get working. At least for now. | ||
175 | */ | ||
176 | trace_likely_condition(f, val, expect); | ||
177 | |||
178 | /* FIXME: Make this atomic! */ | ||
179 | if (val == expect) | ||
180 | f->correct++; | ||
181 | else | ||
182 | f->incorrect++; | ||
183 | } | ||
184 | EXPORT_SYMBOL(ftrace_likely_update); | ||
185 | |||
186 | struct ftrace_pointer { | ||
187 | void *start; | ||
188 | void *stop; | ||
189 | int hit; | ||
190 | }; | ||
191 | |||
192 | static void * | ||
193 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
194 | { | ||
195 | const struct ftrace_pointer *f = m->private; | ||
196 | struct ftrace_branch_data *p = v; | ||
197 | |||
198 | (*pos)++; | ||
199 | |||
200 | if (v == (void *)1) | ||
201 | return f->start; | ||
202 | |||
203 | ++p; | ||
204 | |||
205 | if ((void *)p >= (void *)f->stop) | ||
206 | return NULL; | ||
207 | |||
208 | return p; | ||
209 | } | ||
210 | |||
211 | static void *t_start(struct seq_file *m, loff_t *pos) | ||
212 | { | ||
213 | void *t = (void *)1; | ||
214 | loff_t l = 0; | ||
215 | |||
216 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
217 | ; | ||
218 | |||
219 | return t; | ||
220 | } | ||
221 | |||
222 | static void t_stop(struct seq_file *m, void *p) | ||
223 | { | ||
224 | } | ||
225 | |||
226 | static int t_show(struct seq_file *m, void *v) | ||
227 | { | ||
228 | const struct ftrace_pointer *fp = m->private; | ||
229 | struct ftrace_branch_data *p = v; | ||
230 | const char *f; | ||
231 | long percent; | ||
232 | |||
233 | if (v == (void *)1) { | ||
234 | if (fp->hit) | ||
235 | seq_printf(m, " miss hit %% "); | ||
236 | else | ||
237 | seq_printf(m, " correct incorrect %% "); | ||
238 | seq_printf(m, " Function " | ||
239 | " File Line\n" | ||
240 | " ------- --------- - " | ||
241 | " -------- " | ||
242 | " ---- ----\n"); | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | /* Only print the file, not the path */ | ||
247 | f = p->file + strlen(p->file); | ||
248 | while (f >= p->file && *f != '/') | ||
249 | f--; | ||
250 | f++; | ||
251 | |||
252 | /* | ||
253 | * The miss is overlayed on correct, and hit on incorrect. | ||
254 | */ | ||
255 | if (p->correct) { | ||
256 | percent = p->incorrect * 100; | ||
257 | percent /= p->correct + p->incorrect; | ||
258 | } else | ||
259 | percent = p->incorrect ? 100 : -1; | ||
260 | |||
261 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | ||
262 | if (percent < 0) | ||
263 | seq_printf(m, " X "); | ||
264 | else | ||
265 | seq_printf(m, "%3ld ", percent); | ||
266 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static struct seq_operations tracing_likely_seq_ops = { | ||
271 | .start = t_start, | ||
272 | .next = t_next, | ||
273 | .stop = t_stop, | ||
274 | .show = t_show, | ||
275 | }; | ||
276 | |||
277 | static int tracing_branch_open(struct inode *inode, struct file *file) | ||
278 | { | ||
279 | int ret; | ||
280 | |||
281 | ret = seq_open(file, &tracing_likely_seq_ops); | ||
282 | if (!ret) { | ||
283 | struct seq_file *m = file->private_data; | ||
284 | m->private = (void *)inode->i_private; | ||
285 | } | ||
286 | |||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | static const struct file_operations tracing_branch_fops = { | ||
291 | .open = tracing_branch_open, | ||
292 | .read = seq_read, | ||
293 | .llseek = seq_lseek, | ||
294 | }; | ||
295 | |||
296 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | ||
297 | extern unsigned long __start_branch_profile[]; | ||
298 | extern unsigned long __stop_branch_profile[]; | ||
299 | |||
300 | static const struct ftrace_pointer ftrace_branch_pos = { | ||
301 | .start = __start_branch_profile, | ||
302 | .stop = __stop_branch_profile, | ||
303 | .hit = 1, | ||
304 | }; | ||
305 | |||
306 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | ||
307 | |||
308 | extern unsigned long __start_annotated_branch_profile[]; | ||
309 | extern unsigned long __stop_annotated_branch_profile[]; | ||
310 | |||
311 | static const struct ftrace_pointer ftrace_annotated_branch_pos = { | ||
312 | .start = __start_annotated_branch_profile, | ||
313 | .stop = __stop_annotated_branch_profile, | ||
314 | }; | ||
315 | |||
316 | static __init int ftrace_branch_init(void) | ||
317 | { | ||
318 | struct dentry *d_tracer; | ||
319 | struct dentry *entry; | ||
320 | |||
321 | d_tracer = tracing_init_dentry(); | ||
322 | |||
323 | entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer, | ||
324 | (void *)&ftrace_annotated_branch_pos, | ||
325 | &tracing_branch_fops); | ||
326 | if (!entry) | ||
327 | pr_warning("Could not create debugfs " | ||
328 | "'profile_annotatet_branch' entry\n"); | ||
329 | |||
330 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | ||
331 | entry = debugfs_create_file("profile_branch", 0444, d_tracer, | ||
332 | (void *)&ftrace_branch_pos, | ||
333 | &tracing_branch_fops); | ||
334 | if (!entry) | ||
335 | pr_warning("Could not create debugfs" | ||
336 | " 'profile_branch' entry\n"); | ||
337 | #endif | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | device_initcall(ftrace_branch_init); | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 0f85a64003d3..9236d7e25a16 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -16,20 +16,10 @@ | |||
16 | 16 | ||
17 | #include "trace.h" | 17 | #include "trace.h" |
18 | 18 | ||
19 | static void function_reset(struct trace_array *tr) | ||
20 | { | ||
21 | int cpu; | ||
22 | |||
23 | tr->time_start = ftrace_now(tr->cpu); | ||
24 | |||
25 | for_each_online_cpu(cpu) | ||
26 | tracing_reset(tr, cpu); | ||
27 | } | ||
28 | |||
29 | static void start_function_trace(struct trace_array *tr) | 19 | static void start_function_trace(struct trace_array *tr) |
30 | { | 20 | { |
31 | tr->cpu = get_cpu(); | 21 | tr->cpu = get_cpu(); |
32 | function_reset(tr); | 22 | tracing_reset_online_cpus(tr); |
33 | put_cpu(); | 23 | put_cpu(); |
34 | 24 | ||
35 | tracing_start_cmdline_record(); | 25 | tracing_start_cmdline_record(); |
@@ -42,24 +32,20 @@ static void stop_function_trace(struct trace_array *tr) | |||
42 | tracing_stop_cmdline_record(); | 32 | tracing_stop_cmdline_record(); |
43 | } | 33 | } |
44 | 34 | ||
45 | static void function_trace_init(struct trace_array *tr) | 35 | static int function_trace_init(struct trace_array *tr) |
46 | { | 36 | { |
47 | if (tr->ctrl) | 37 | start_function_trace(tr); |
48 | start_function_trace(tr); | 38 | return 0; |
49 | } | 39 | } |
50 | 40 | ||
51 | static void function_trace_reset(struct trace_array *tr) | 41 | static void function_trace_reset(struct trace_array *tr) |
52 | { | 42 | { |
53 | if (tr->ctrl) | 43 | stop_function_trace(tr); |
54 | stop_function_trace(tr); | ||
55 | } | 44 | } |
56 | 45 | ||
57 | static void function_trace_ctrl_update(struct trace_array *tr) | 46 | static void function_trace_start(struct trace_array *tr) |
58 | { | 47 | { |
59 | if (tr->ctrl) | 48 | tracing_reset_online_cpus(tr); |
60 | start_function_trace(tr); | ||
61 | else | ||
62 | stop_function_trace(tr); | ||
63 | } | 49 | } |
64 | 50 | ||
65 | static struct tracer function_trace __read_mostly = | 51 | static struct tracer function_trace __read_mostly = |
@@ -67,7 +53,7 @@ static struct tracer function_trace __read_mostly = | |||
67 | .name = "function", | 53 | .name = "function", |
68 | .init = function_trace_init, | 54 | .init = function_trace_init, |
69 | .reset = function_trace_reset, | 55 | .reset = function_trace_reset, |
70 | .ctrl_update = function_trace_ctrl_update, | 56 | .start = function_trace_start, |
71 | #ifdef CONFIG_FTRACE_SELFTEST | 57 | #ifdef CONFIG_FTRACE_SELFTEST |
72 | .selftest = trace_selftest_startup_function, | 58 | .selftest = trace_selftest_startup_function, |
73 | #endif | 59 | #endif |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c new file mode 100644 index 000000000000..4bf39fcae97a --- /dev/null +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -0,0 +1,669 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Function graph tracer. | ||
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * Mostly borrowed from function tracer which | ||
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | ||
7 | * | ||
8 | */ | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/uaccess.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/fs.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | |||
16 | #define TRACE_GRAPH_INDENT 2 | ||
17 | |||
18 | /* Flag options */ | ||
19 | #define TRACE_GRAPH_PRINT_OVERRUN 0x1 | ||
20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | ||
21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | ||
22 | #define TRACE_GRAPH_PRINT_PROC 0x8 | ||
23 | |||
24 | static struct tracer_opt trace_opts[] = { | ||
25 | /* Display overruns ? */ | ||
26 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | ||
27 | /* Display CPU ? */ | ||
28 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | ||
29 | /* Display Overhead ? */ | ||
30 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | ||
31 | /* Display proc name/pid */ | ||
32 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | ||
33 | { } /* Empty entry */ | ||
34 | }; | ||
35 | |||
36 | static struct tracer_flags tracer_flags = { | ||
37 | /* Don't display overruns and proc by default */ | ||
38 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | ||
39 | .opts = trace_opts | ||
40 | }; | ||
41 | |||
42 | /* pid on the last trace processed */ | ||
43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | ||
44 | |||
45 | static int graph_trace_init(struct trace_array *tr) | ||
46 | { | ||
47 | int cpu, ret; | ||
48 | |||
49 | for_each_online_cpu(cpu) | ||
50 | tracing_reset(tr, cpu); | ||
51 | |||
52 | ret = register_ftrace_graph(&trace_graph_return, | ||
53 | &trace_graph_entry); | ||
54 | if (ret) | ||
55 | return ret; | ||
56 | tracing_start_cmdline_record(); | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static void graph_trace_reset(struct trace_array *tr) | ||
62 | { | ||
63 | tracing_stop_cmdline_record(); | ||
64 | unregister_ftrace_graph(); | ||
65 | } | ||
66 | |||
67 | static inline int log10_cpu(int nb) | ||
68 | { | ||
69 | if (nb / 100) | ||
70 | return 3; | ||
71 | if (nb / 10) | ||
72 | return 2; | ||
73 | return 1; | ||
74 | } | ||
75 | |||
76 | static enum print_line_t | ||
77 | print_graph_cpu(struct trace_seq *s, int cpu) | ||
78 | { | ||
79 | int i; | ||
80 | int ret; | ||
81 | int log10_this = log10_cpu(cpu); | ||
82 | int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map)); | ||
83 | |||
84 | |||
85 | /* | ||
86 | * Start with a space character - to make it stand out | ||
87 | * to the right a bit when trace output is pasted into | ||
88 | * email: | ||
89 | */ | ||
90 | ret = trace_seq_printf(s, " "); | ||
91 | |||
92 | /* | ||
93 | * Tricky - we space the CPU field according to the max | ||
94 | * number of online CPUs. On a 2-cpu system it would take | ||
95 | * a maximum of 1 digit - on a 128 cpu system it would | ||
96 | * take up to 3 digits: | ||
97 | */ | ||
98 | for (i = 0; i < log10_all - log10_this; i++) { | ||
99 | ret = trace_seq_printf(s, " "); | ||
100 | if (!ret) | ||
101 | return TRACE_TYPE_PARTIAL_LINE; | ||
102 | } | ||
103 | ret = trace_seq_printf(s, "%d) ", cpu); | ||
104 | if (!ret) | ||
105 | return TRACE_TYPE_PARTIAL_LINE; | ||
106 | |||
107 | return TRACE_TYPE_HANDLED; | ||
108 | } | ||
109 | |||
110 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | ||
111 | |||
112 | static enum print_line_t | ||
113 | print_graph_proc(struct trace_seq *s, pid_t pid) | ||
114 | { | ||
115 | int i; | ||
116 | int ret; | ||
117 | int len; | ||
118 | char comm[8]; | ||
119 | int spaces = 0; | ||
120 | /* sign + log10(MAX_INT) + '\0' */ | ||
121 | char pid_str[11]; | ||
122 | |||
123 | strncpy(comm, trace_find_cmdline(pid), 7); | ||
124 | comm[7] = '\0'; | ||
125 | sprintf(pid_str, "%d", pid); | ||
126 | |||
127 | /* 1 stands for the "-" character */ | ||
128 | len = strlen(comm) + strlen(pid_str) + 1; | ||
129 | |||
130 | if (len < TRACE_GRAPH_PROCINFO_LENGTH) | ||
131 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | ||
132 | |||
133 | /* First spaces to align center */ | ||
134 | for (i = 0; i < spaces / 2; i++) { | ||
135 | ret = trace_seq_printf(s, " "); | ||
136 | if (!ret) | ||
137 | return TRACE_TYPE_PARTIAL_LINE; | ||
138 | } | ||
139 | |||
140 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | ||
141 | if (!ret) | ||
142 | return TRACE_TYPE_PARTIAL_LINE; | ||
143 | |||
144 | /* Last spaces to align center */ | ||
145 | for (i = 0; i < spaces - (spaces / 2); i++) { | ||
146 | ret = trace_seq_printf(s, " "); | ||
147 | if (!ret) | ||
148 | return TRACE_TYPE_PARTIAL_LINE; | ||
149 | } | ||
150 | return TRACE_TYPE_HANDLED; | ||
151 | } | ||
152 | |||
153 | |||
154 | /* If the pid changed since the last trace, output this event */ | ||
155 | static enum print_line_t | ||
156 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | ||
157 | { | ||
158 | pid_t prev_pid; | ||
159 | int ret; | ||
160 | |||
161 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | ||
162 | return TRACE_TYPE_HANDLED; | ||
163 | |||
164 | prev_pid = last_pid[cpu]; | ||
165 | last_pid[cpu] = pid; | ||
166 | |||
167 | /* | ||
168 | * Context-switch trace line: | ||
169 | |||
170 | ------------------------------------------ | ||
171 | | 1) migration/0--1 => sshd-1755 | ||
172 | ------------------------------------------ | ||
173 | |||
174 | */ | ||
175 | ret = trace_seq_printf(s, | ||
176 | " ------------------------------------------\n"); | ||
177 | if (!ret) | ||
178 | TRACE_TYPE_PARTIAL_LINE; | ||
179 | |||
180 | ret = print_graph_cpu(s, cpu); | ||
181 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
182 | TRACE_TYPE_PARTIAL_LINE; | ||
183 | |||
184 | ret = print_graph_proc(s, prev_pid); | ||
185 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
186 | TRACE_TYPE_PARTIAL_LINE; | ||
187 | |||
188 | ret = trace_seq_printf(s, " => "); | ||
189 | if (!ret) | ||
190 | TRACE_TYPE_PARTIAL_LINE; | ||
191 | |||
192 | ret = print_graph_proc(s, pid); | ||
193 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
194 | TRACE_TYPE_PARTIAL_LINE; | ||
195 | |||
196 | ret = trace_seq_printf(s, | ||
197 | "\n ------------------------------------------\n\n"); | ||
198 | if (!ret) | ||
199 | TRACE_TYPE_PARTIAL_LINE; | ||
200 | |||
201 | return ret; | ||
202 | } | ||
203 | |||
204 | static bool | ||
205 | trace_branch_is_leaf(struct trace_iterator *iter, | ||
206 | struct ftrace_graph_ent_entry *curr) | ||
207 | { | ||
208 | struct ring_buffer_iter *ring_iter; | ||
209 | struct ring_buffer_event *event; | ||
210 | struct ftrace_graph_ret_entry *next; | ||
211 | |||
212 | ring_iter = iter->buffer_iter[iter->cpu]; | ||
213 | |||
214 | if (!ring_iter) | ||
215 | return false; | ||
216 | |||
217 | event = ring_buffer_iter_peek(ring_iter, NULL); | ||
218 | |||
219 | if (!event) | ||
220 | return false; | ||
221 | |||
222 | next = ring_buffer_event_data(event); | ||
223 | |||
224 | if (next->ent.type != TRACE_GRAPH_RET) | ||
225 | return false; | ||
226 | |||
227 | if (curr->ent.pid != next->ent.pid || | ||
228 | curr->graph_ent.func != next->ret.func) | ||
229 | return false; | ||
230 | |||
231 | return true; | ||
232 | } | ||
233 | |||
234 | static enum print_line_t | ||
235 | print_graph_irq(struct trace_seq *s, unsigned long addr, | ||
236 | enum trace_type type, int cpu, pid_t pid) | ||
237 | { | ||
238 | int ret; | ||
239 | |||
240 | if (addr < (unsigned long)__irqentry_text_start || | ||
241 | addr >= (unsigned long)__irqentry_text_end) | ||
242 | return TRACE_TYPE_UNHANDLED; | ||
243 | |||
244 | if (type == TRACE_GRAPH_ENT) { | ||
245 | ret = trace_seq_printf(s, "==========> | "); | ||
246 | } else { | ||
247 | /* Cpu */ | ||
248 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
249 | ret = print_graph_cpu(s, cpu); | ||
250 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
251 | return TRACE_TYPE_PARTIAL_LINE; | ||
252 | } | ||
253 | /* Proc */ | ||
254 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
255 | ret = print_graph_proc(s, pid); | ||
256 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
257 | return TRACE_TYPE_PARTIAL_LINE; | ||
258 | |||
259 | ret = trace_seq_printf(s, " | "); | ||
260 | if (!ret) | ||
261 | return TRACE_TYPE_PARTIAL_LINE; | ||
262 | } | ||
263 | |||
264 | /* No overhead */ | ||
265 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
266 | ret = trace_seq_printf(s, " "); | ||
267 | if (!ret) | ||
268 | return TRACE_TYPE_PARTIAL_LINE; | ||
269 | } | ||
270 | |||
271 | ret = trace_seq_printf(s, "<========== |\n"); | ||
272 | } | ||
273 | if (!ret) | ||
274 | return TRACE_TYPE_PARTIAL_LINE; | ||
275 | return TRACE_TYPE_HANDLED; | ||
276 | } | ||
277 | |||
278 | static enum print_line_t | ||
279 | print_graph_duration(unsigned long long duration, struct trace_seq *s) | ||
280 | { | ||
281 | unsigned long nsecs_rem = do_div(duration, 1000); | ||
282 | /* log10(ULONG_MAX) + '\0' */ | ||
283 | char msecs_str[21]; | ||
284 | char nsecs_str[5]; | ||
285 | int ret, len; | ||
286 | int i; | ||
287 | |||
288 | sprintf(msecs_str, "%lu", (unsigned long) duration); | ||
289 | |||
290 | /* Print msecs */ | ||
291 | ret = trace_seq_printf(s, msecs_str); | ||
292 | if (!ret) | ||
293 | return TRACE_TYPE_PARTIAL_LINE; | ||
294 | |||
295 | len = strlen(msecs_str); | ||
296 | |||
297 | /* Print nsecs (we don't want to exceed 7 numbers) */ | ||
298 | if (len < 7) { | ||
299 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | ||
300 | ret = trace_seq_printf(s, ".%s", nsecs_str); | ||
301 | if (!ret) | ||
302 | return TRACE_TYPE_PARTIAL_LINE; | ||
303 | len += strlen(nsecs_str); | ||
304 | } | ||
305 | |||
306 | ret = trace_seq_printf(s, " us "); | ||
307 | if (!ret) | ||
308 | return TRACE_TYPE_PARTIAL_LINE; | ||
309 | |||
310 | /* Print remaining spaces to fit the row's width */ | ||
311 | for (i = len; i < 7; i++) { | ||
312 | ret = trace_seq_printf(s, " "); | ||
313 | if (!ret) | ||
314 | return TRACE_TYPE_PARTIAL_LINE; | ||
315 | } | ||
316 | |||
317 | ret = trace_seq_printf(s, "| "); | ||
318 | if (!ret) | ||
319 | return TRACE_TYPE_PARTIAL_LINE; | ||
320 | return TRACE_TYPE_HANDLED; | ||
321 | |||
322 | } | ||
323 | |||
324 | /* Signal a overhead of time execution to the output */ | ||
325 | static int | ||
326 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
327 | { | ||
328 | /* Duration exceeded 100 msecs */ | ||
329 | if (duration > 100000ULL) | ||
330 | return trace_seq_printf(s, "! "); | ||
331 | |||
332 | /* Duration exceeded 10 msecs */ | ||
333 | if (duration > 10000ULL) | ||
334 | return trace_seq_printf(s, "+ "); | ||
335 | |||
336 | return trace_seq_printf(s, " "); | ||
337 | } | ||
338 | |||
339 | /* Case of a leaf function on its call entry */ | ||
340 | static enum print_line_t | ||
341 | print_graph_entry_leaf(struct trace_iterator *iter, | ||
342 | struct ftrace_graph_ent_entry *entry, struct trace_seq *s) | ||
343 | { | ||
344 | struct ftrace_graph_ret_entry *ret_entry; | ||
345 | struct ftrace_graph_ret *graph_ret; | ||
346 | struct ring_buffer_event *event; | ||
347 | struct ftrace_graph_ent *call; | ||
348 | unsigned long long duration; | ||
349 | int ret; | ||
350 | int i; | ||
351 | |||
352 | event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
353 | ret_entry = ring_buffer_event_data(event); | ||
354 | graph_ret = &ret_entry->ret; | ||
355 | call = &entry->graph_ent; | ||
356 | duration = graph_ret->rettime - graph_ret->calltime; | ||
357 | |||
358 | /* Overhead */ | ||
359 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
360 | ret = print_graph_overhead(duration, s); | ||
361 | if (!ret) | ||
362 | return TRACE_TYPE_PARTIAL_LINE; | ||
363 | } | ||
364 | |||
365 | /* Duration */ | ||
366 | ret = print_graph_duration(duration, s); | ||
367 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
368 | return TRACE_TYPE_PARTIAL_LINE; | ||
369 | |||
370 | /* Function */ | ||
371 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | ||
372 | ret = trace_seq_printf(s, " "); | ||
373 | if (!ret) | ||
374 | return TRACE_TYPE_PARTIAL_LINE; | ||
375 | } | ||
376 | |||
377 | ret = seq_print_ip_sym(s, call->func, 0); | ||
378 | if (!ret) | ||
379 | return TRACE_TYPE_PARTIAL_LINE; | ||
380 | |||
381 | ret = trace_seq_printf(s, "();\n"); | ||
382 | if (!ret) | ||
383 | return TRACE_TYPE_PARTIAL_LINE; | ||
384 | |||
385 | return TRACE_TYPE_HANDLED; | ||
386 | } | ||
387 | |||
388 | static enum print_line_t | ||
389 | print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | ||
390 | struct trace_seq *s, pid_t pid, int cpu) | ||
391 | { | ||
392 | int i; | ||
393 | int ret; | ||
394 | struct ftrace_graph_ent *call = &entry->graph_ent; | ||
395 | |||
396 | /* No overhead */ | ||
397 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
398 | ret = trace_seq_printf(s, " "); | ||
399 | if (!ret) | ||
400 | return TRACE_TYPE_PARTIAL_LINE; | ||
401 | } | ||
402 | |||
403 | /* Interrupt */ | ||
404 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); | ||
405 | if (ret == TRACE_TYPE_UNHANDLED) { | ||
406 | /* No time */ | ||
407 | ret = trace_seq_printf(s, " | "); | ||
408 | if (!ret) | ||
409 | return TRACE_TYPE_PARTIAL_LINE; | ||
410 | } else { | ||
411 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
412 | return TRACE_TYPE_PARTIAL_LINE; | ||
413 | } | ||
414 | |||
415 | |||
416 | /* Function */ | ||
417 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | ||
418 | ret = trace_seq_printf(s, " "); | ||
419 | if (!ret) | ||
420 | return TRACE_TYPE_PARTIAL_LINE; | ||
421 | } | ||
422 | |||
423 | ret = seq_print_ip_sym(s, call->func, 0); | ||
424 | if (!ret) | ||
425 | return TRACE_TYPE_PARTIAL_LINE; | ||
426 | |||
427 | ret = trace_seq_printf(s, "() {\n"); | ||
428 | if (!ret) | ||
429 | return TRACE_TYPE_PARTIAL_LINE; | ||
430 | |||
431 | return TRACE_TYPE_HANDLED; | ||
432 | } | ||
433 | |||
434 | static enum print_line_t | ||
435 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | ||
436 | struct trace_iterator *iter, int cpu) | ||
437 | { | ||
438 | int ret; | ||
439 | struct trace_entry *ent = iter->ent; | ||
440 | |||
441 | /* Pid */ | ||
442 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | ||
443 | return TRACE_TYPE_PARTIAL_LINE; | ||
444 | |||
445 | /* Cpu */ | ||
446 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
447 | ret = print_graph_cpu(s, cpu); | ||
448 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
449 | return TRACE_TYPE_PARTIAL_LINE; | ||
450 | } | ||
451 | |||
452 | /* Proc */ | ||
453 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
454 | ret = print_graph_proc(s, ent->pid); | ||
455 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
456 | return TRACE_TYPE_PARTIAL_LINE; | ||
457 | |||
458 | ret = trace_seq_printf(s, " | "); | ||
459 | if (!ret) | ||
460 | return TRACE_TYPE_PARTIAL_LINE; | ||
461 | } | ||
462 | |||
463 | if (trace_branch_is_leaf(iter, field)) | ||
464 | return print_graph_entry_leaf(iter, field, s); | ||
465 | else | ||
466 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); | ||
467 | |||
468 | } | ||
469 | |||
470 | static enum print_line_t | ||
471 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | ||
472 | struct trace_entry *ent, int cpu) | ||
473 | { | ||
474 | int i; | ||
475 | int ret; | ||
476 | unsigned long long duration = trace->rettime - trace->calltime; | ||
477 | |||
478 | /* Pid */ | ||
479 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | ||
480 | return TRACE_TYPE_PARTIAL_LINE; | ||
481 | |||
482 | /* Cpu */ | ||
483 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
484 | ret = print_graph_cpu(s, cpu); | ||
485 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
486 | return TRACE_TYPE_PARTIAL_LINE; | ||
487 | } | ||
488 | |||
489 | /* Proc */ | ||
490 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
491 | ret = print_graph_proc(s, ent->pid); | ||
492 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
493 | return TRACE_TYPE_PARTIAL_LINE; | ||
494 | |||
495 | ret = trace_seq_printf(s, " | "); | ||
496 | if (!ret) | ||
497 | return TRACE_TYPE_PARTIAL_LINE; | ||
498 | } | ||
499 | |||
500 | /* Overhead */ | ||
501 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
502 | ret = print_graph_overhead(duration, s); | ||
503 | if (!ret) | ||
504 | return TRACE_TYPE_PARTIAL_LINE; | ||
505 | } | ||
506 | |||
507 | /* Duration */ | ||
508 | ret = print_graph_duration(duration, s); | ||
509 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
510 | return TRACE_TYPE_PARTIAL_LINE; | ||
511 | |||
512 | /* Closing brace */ | ||
513 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | ||
514 | ret = trace_seq_printf(s, " "); | ||
515 | if (!ret) | ||
516 | return TRACE_TYPE_PARTIAL_LINE; | ||
517 | } | ||
518 | |||
519 | ret = trace_seq_printf(s, "}\n"); | ||
520 | if (!ret) | ||
521 | return TRACE_TYPE_PARTIAL_LINE; | ||
522 | |||
523 | /* Overrun */ | ||
524 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERRUN) { | ||
525 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | ||
526 | trace->overrun); | ||
527 | if (!ret) | ||
528 | return TRACE_TYPE_PARTIAL_LINE; | ||
529 | } | ||
530 | |||
531 | ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); | ||
532 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
533 | return TRACE_TYPE_PARTIAL_LINE; | ||
534 | |||
535 | return TRACE_TYPE_HANDLED; | ||
536 | } | ||
537 | |||
538 | static enum print_line_t | ||
539 | print_graph_comment(struct print_entry *trace, struct trace_seq *s, | ||
540 | struct trace_entry *ent, struct trace_iterator *iter) | ||
541 | { | ||
542 | int i; | ||
543 | int ret; | ||
544 | |||
545 | /* Pid */ | ||
546 | if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) | ||
547 | return TRACE_TYPE_PARTIAL_LINE; | ||
548 | |||
549 | /* Cpu */ | ||
550 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | ||
551 | ret = print_graph_cpu(s, iter->cpu); | ||
552 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
553 | return TRACE_TYPE_PARTIAL_LINE; | ||
554 | } | ||
555 | |||
556 | /* Proc */ | ||
557 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
558 | ret = print_graph_proc(s, ent->pid); | ||
559 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
560 | return TRACE_TYPE_PARTIAL_LINE; | ||
561 | |||
562 | ret = trace_seq_printf(s, " | "); | ||
563 | if (!ret) | ||
564 | return TRACE_TYPE_PARTIAL_LINE; | ||
565 | } | ||
566 | |||
567 | /* No overhead */ | ||
568 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
569 | ret = trace_seq_printf(s, " "); | ||
570 | if (!ret) | ||
571 | return TRACE_TYPE_PARTIAL_LINE; | ||
572 | } | ||
573 | |||
574 | /* No time */ | ||
575 | ret = trace_seq_printf(s, " | "); | ||
576 | if (!ret) | ||
577 | return TRACE_TYPE_PARTIAL_LINE; | ||
578 | |||
579 | /* Indentation */ | ||
580 | if (trace->depth > 0) | ||
581 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | ||
582 | ret = trace_seq_printf(s, " "); | ||
583 | if (!ret) | ||
584 | return TRACE_TYPE_PARTIAL_LINE; | ||
585 | } | ||
586 | |||
587 | /* The comment */ | ||
588 | ret = trace_seq_printf(s, "/* %s", trace->buf); | ||
589 | if (!ret) | ||
590 | return TRACE_TYPE_PARTIAL_LINE; | ||
591 | |||
592 | if (ent->flags & TRACE_FLAG_CONT) | ||
593 | trace_seq_print_cont(s, iter); | ||
594 | |||
595 | ret = trace_seq_printf(s, " */\n"); | ||
596 | if (!ret) | ||
597 | return TRACE_TYPE_PARTIAL_LINE; | ||
598 | |||
599 | return TRACE_TYPE_HANDLED; | ||
600 | } | ||
601 | |||
602 | |||
603 | enum print_line_t | ||
604 | print_graph_function(struct trace_iterator *iter) | ||
605 | { | ||
606 | struct trace_seq *s = &iter->seq; | ||
607 | struct trace_entry *entry = iter->ent; | ||
608 | |||
609 | switch (entry->type) { | ||
610 | case TRACE_GRAPH_ENT: { | ||
611 | struct ftrace_graph_ent_entry *field; | ||
612 | trace_assign_type(field, entry); | ||
613 | return print_graph_entry(field, s, iter, | ||
614 | iter->cpu); | ||
615 | } | ||
616 | case TRACE_GRAPH_RET: { | ||
617 | struct ftrace_graph_ret_entry *field; | ||
618 | trace_assign_type(field, entry); | ||
619 | return print_graph_return(&field->ret, s, entry, iter->cpu); | ||
620 | } | ||
621 | case TRACE_PRINT: { | ||
622 | struct print_entry *field; | ||
623 | trace_assign_type(field, entry); | ||
624 | return print_graph_comment(field, s, entry, iter); | ||
625 | } | ||
626 | default: | ||
627 | return TRACE_TYPE_UNHANDLED; | ||
628 | } | ||
629 | } | ||
630 | |||
631 | static void print_graph_headers(struct seq_file *s) | ||
632 | { | ||
633 | /* 1st line */ | ||
634 | seq_printf(s, "# "); | ||
635 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | ||
636 | seq_printf(s, "CPU "); | ||
637 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | ||
638 | seq_printf(s, "TASK/PID "); | ||
639 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) | ||
640 | seq_printf(s, "OVERHEAD/"); | ||
641 | seq_printf(s, "DURATION FUNCTION CALLS\n"); | ||
642 | |||
643 | /* 2nd line */ | ||
644 | seq_printf(s, "# "); | ||
645 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | ||
646 | seq_printf(s, "| "); | ||
647 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | ||
648 | seq_printf(s, "| | "); | ||
649 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
650 | seq_printf(s, "| "); | ||
651 | seq_printf(s, "| | | | |\n"); | ||
652 | } else | ||
653 | seq_printf(s, " | | | | |\n"); | ||
654 | } | ||
655 | static struct tracer graph_trace __read_mostly = { | ||
656 | .name = "function_graph", | ||
657 | .init = graph_trace_init, | ||
658 | .reset = graph_trace_reset, | ||
659 | .print_line = print_graph_function, | ||
660 | .print_header = print_graph_headers, | ||
661 | .flags = &tracer_flags, | ||
662 | }; | ||
663 | |||
664 | static __init int init_graph_trace(void) | ||
665 | { | ||
666 | return register_tracer(&graph_trace); | ||
667 | } | ||
668 | |||
669 | device_initcall(init_graph_trace); | ||
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c new file mode 100644 index 000000000000..b6a3e20a49a9 --- /dev/null +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * h/w branch tracer for x86 based on bts | ||
3 | * | ||
4 | * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/kallsyms.h> | ||
13 | |||
14 | #include <asm/ds.h> | ||
15 | |||
16 | #include "trace.h" | ||
17 | |||
18 | |||
19 | #define SIZEOF_BTS (1 << 13) | ||
20 | |||
21 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | ||
22 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); | ||
23 | |||
24 | #define this_tracer per_cpu(tracer, smp_processor_id()) | ||
25 | #define this_buffer per_cpu(buffer, smp_processor_id()) | ||
26 | |||
27 | |||
28 | static void bts_trace_start_cpu(void *arg) | ||
29 | { | ||
30 | if (this_tracer) | ||
31 | ds_release_bts(this_tracer); | ||
32 | |||
33 | this_tracer = | ||
34 | ds_request_bts(/* task = */ NULL, this_buffer, SIZEOF_BTS, | ||
35 | /* ovfl = */ NULL, /* th = */ (size_t)-1, | ||
36 | BTS_KERNEL); | ||
37 | if (IS_ERR(this_tracer)) { | ||
38 | this_tracer = NULL; | ||
39 | return; | ||
40 | } | ||
41 | } | ||
42 | |||
43 | static void bts_trace_start(struct trace_array *tr) | ||
44 | { | ||
45 | int cpu; | ||
46 | |||
47 | tracing_reset_online_cpus(tr); | ||
48 | |||
49 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
50 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | ||
51 | } | ||
52 | |||
53 | static void bts_trace_stop_cpu(void *arg) | ||
54 | { | ||
55 | if (this_tracer) { | ||
56 | ds_release_bts(this_tracer); | ||
57 | this_tracer = NULL; | ||
58 | } | ||
59 | } | ||
60 | |||
61 | static void bts_trace_stop(struct trace_array *tr) | ||
62 | { | ||
63 | int cpu; | ||
64 | |||
65 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
66 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | ||
67 | } | ||
68 | |||
69 | static int bts_trace_init(struct trace_array *tr) | ||
70 | { | ||
71 | tracing_reset_online_cpus(tr); | ||
72 | bts_trace_start(tr); | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void bts_trace_print_header(struct seq_file *m) | ||
78 | { | ||
79 | seq_puts(m, | ||
80 | "# CPU# FROM TO FUNCTION\n"); | ||
81 | seq_puts(m, | ||
82 | "# | | | |\n"); | ||
83 | } | ||
84 | |||
85 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | ||
86 | { | ||
87 | struct trace_entry *entry = iter->ent; | ||
88 | struct trace_seq *seq = &iter->seq; | ||
89 | struct hw_branch_entry *it; | ||
90 | |||
91 | trace_assign_type(it, entry); | ||
92 | |||
93 | if (entry->type == TRACE_HW_BRANCHES) { | ||
94 | if (trace_seq_printf(seq, "%4d ", entry->cpu) && | ||
95 | trace_seq_printf(seq, "0x%016llx -> 0x%016llx ", | ||
96 | it->from, it->to) && | ||
97 | (!it->from || | ||
98 | seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) && | ||
99 | trace_seq_printf(seq, "\n")) | ||
100 | return TRACE_TYPE_HANDLED; | ||
101 | return TRACE_TYPE_PARTIAL_LINE;; | ||
102 | } | ||
103 | return TRACE_TYPE_UNHANDLED; | ||
104 | } | ||
105 | |||
106 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to) | ||
107 | { | ||
108 | struct ring_buffer_event *event; | ||
109 | struct hw_branch_entry *entry; | ||
110 | unsigned long irq; | ||
111 | |||
112 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); | ||
113 | if (!event) | ||
114 | return; | ||
115 | entry = ring_buffer_event_data(event); | ||
116 | tracing_generic_entry_update(&entry->ent, 0, from); | ||
117 | entry->ent.type = TRACE_HW_BRANCHES; | ||
118 | entry->ent.cpu = smp_processor_id(); | ||
119 | entry->from = from; | ||
120 | entry->to = to; | ||
121 | ring_buffer_unlock_commit(tr->buffer, event, irq); | ||
122 | } | ||
123 | |||
124 | static void trace_bts_at(struct trace_array *tr, | ||
125 | const struct bts_trace *trace, void *at) | ||
126 | { | ||
127 | struct bts_struct bts; | ||
128 | int err = 0; | ||
129 | |||
130 | WARN_ON_ONCE(!trace->read); | ||
131 | if (!trace->read) | ||
132 | return; | ||
133 | |||
134 | err = trace->read(this_tracer, at, &bts); | ||
135 | if (err < 0) | ||
136 | return; | ||
137 | |||
138 | switch (bts.qualifier) { | ||
139 | case BTS_BRANCH: | ||
140 | trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to); | ||
141 | break; | ||
142 | } | ||
143 | } | ||
144 | |||
145 | static void trace_bts_cpu(void *arg) | ||
146 | { | ||
147 | struct trace_array *tr = (struct trace_array *) arg; | ||
148 | const struct bts_trace *trace; | ||
149 | unsigned char *at; | ||
150 | |||
151 | if (!this_tracer) | ||
152 | return; | ||
153 | |||
154 | ds_suspend_bts(this_tracer); | ||
155 | trace = ds_read_bts(this_tracer); | ||
156 | if (!trace) | ||
157 | goto out; | ||
158 | |||
159 | for (at = trace->ds.top; (void *)at < trace->ds.end; | ||
160 | at += trace->ds.size) | ||
161 | trace_bts_at(tr, trace, at); | ||
162 | |||
163 | for (at = trace->ds.begin; (void *)at < trace->ds.top; | ||
164 | at += trace->ds.size) | ||
165 | trace_bts_at(tr, trace, at); | ||
166 | |||
167 | out: | ||
168 | ds_resume_bts(this_tracer); | ||
169 | } | ||
170 | |||
171 | static void trace_bts_prepare(struct trace_iterator *iter) | ||
172 | { | ||
173 | int cpu; | ||
174 | |||
175 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
176 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); | ||
177 | } | ||
178 | |||
179 | struct tracer bts_tracer __read_mostly = | ||
180 | { | ||
181 | .name = "hw-branch-tracer", | ||
182 | .init = bts_trace_init, | ||
183 | .reset = bts_trace_stop, | ||
184 | .print_header = bts_trace_print_header, | ||
185 | .print_line = bts_trace_print_line, | ||
186 | .start = bts_trace_start, | ||
187 | .stop = bts_trace_stop, | ||
188 | .open = trace_bts_prepare | ||
189 | }; | ||
190 | |||
191 | __init static int init_bts_trace(void) | ||
192 | { | ||
193 | return register_tracer(&bts_tracer); | ||
194 | } | ||
195 | device_initcall(init_bts_trace); | ||
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 9c74071c10e0..7c2e326bbc8b 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -353,15 +353,28 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
353 | } | 353 | } |
354 | #endif /* CONFIG_PREEMPT_TRACER */ | 354 | #endif /* CONFIG_PREEMPT_TRACER */ |
355 | 355 | ||
356 | /* | ||
357 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
358 | * variable when we disable it when we open a trace output file. | ||
359 | */ | ||
360 | static int save_tracer_enabled; | ||
361 | |||
356 | static void start_irqsoff_tracer(struct trace_array *tr) | 362 | static void start_irqsoff_tracer(struct trace_array *tr) |
357 | { | 363 | { |
358 | register_ftrace_function(&trace_ops); | 364 | register_ftrace_function(&trace_ops); |
359 | tracer_enabled = 1; | 365 | if (tracing_is_enabled()) { |
366 | tracer_enabled = 1; | ||
367 | save_tracer_enabled = 1; | ||
368 | } else { | ||
369 | tracer_enabled = 0; | ||
370 | save_tracer_enabled = 0; | ||
371 | } | ||
360 | } | 372 | } |
361 | 373 | ||
362 | static void stop_irqsoff_tracer(struct trace_array *tr) | 374 | static void stop_irqsoff_tracer(struct trace_array *tr) |
363 | { | 375 | { |
364 | tracer_enabled = 0; | 376 | tracer_enabled = 0; |
377 | save_tracer_enabled = 0; | ||
365 | unregister_ftrace_function(&trace_ops); | 378 | unregister_ftrace_function(&trace_ops); |
366 | } | 379 | } |
367 | 380 | ||
@@ -370,53 +383,55 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
370 | irqsoff_trace = tr; | 383 | irqsoff_trace = tr; |
371 | /* make sure that the tracer is visible */ | 384 | /* make sure that the tracer is visible */ |
372 | smp_wmb(); | 385 | smp_wmb(); |
373 | 386 | start_irqsoff_tracer(tr); | |
374 | if (tr->ctrl) | ||
375 | start_irqsoff_tracer(tr); | ||
376 | } | 387 | } |
377 | 388 | ||
378 | static void irqsoff_tracer_reset(struct trace_array *tr) | 389 | static void irqsoff_tracer_reset(struct trace_array *tr) |
379 | { | 390 | { |
380 | if (tr->ctrl) | 391 | stop_irqsoff_tracer(tr); |
381 | stop_irqsoff_tracer(tr); | ||
382 | } | 392 | } |
383 | 393 | ||
384 | static void irqsoff_tracer_ctrl_update(struct trace_array *tr) | 394 | static void irqsoff_tracer_start(struct trace_array *tr) |
385 | { | 395 | { |
386 | if (tr->ctrl) | 396 | tracer_enabled = 1; |
387 | start_irqsoff_tracer(tr); | 397 | save_tracer_enabled = 1; |
388 | else | 398 | } |
389 | stop_irqsoff_tracer(tr); | 399 | |
400 | static void irqsoff_tracer_stop(struct trace_array *tr) | ||
401 | { | ||
402 | tracer_enabled = 0; | ||
403 | save_tracer_enabled = 0; | ||
390 | } | 404 | } |
391 | 405 | ||
392 | static void irqsoff_tracer_open(struct trace_iterator *iter) | 406 | static void irqsoff_tracer_open(struct trace_iterator *iter) |
393 | { | 407 | { |
394 | /* stop the trace while dumping */ | 408 | /* stop the trace while dumping */ |
395 | if (iter->tr->ctrl) | 409 | tracer_enabled = 0; |
396 | stop_irqsoff_tracer(iter->tr); | ||
397 | } | 410 | } |
398 | 411 | ||
399 | static void irqsoff_tracer_close(struct trace_iterator *iter) | 412 | static void irqsoff_tracer_close(struct trace_iterator *iter) |
400 | { | 413 | { |
401 | if (iter->tr->ctrl) | 414 | /* restart tracing */ |
402 | start_irqsoff_tracer(iter->tr); | 415 | tracer_enabled = save_tracer_enabled; |
403 | } | 416 | } |
404 | 417 | ||
405 | #ifdef CONFIG_IRQSOFF_TRACER | 418 | #ifdef CONFIG_IRQSOFF_TRACER |
406 | static void irqsoff_tracer_init(struct trace_array *tr) | 419 | static int irqsoff_tracer_init(struct trace_array *tr) |
407 | { | 420 | { |
408 | trace_type = TRACER_IRQS_OFF; | 421 | trace_type = TRACER_IRQS_OFF; |
409 | 422 | ||
410 | __irqsoff_tracer_init(tr); | 423 | __irqsoff_tracer_init(tr); |
424 | return 0; | ||
411 | } | 425 | } |
412 | static struct tracer irqsoff_tracer __read_mostly = | 426 | static struct tracer irqsoff_tracer __read_mostly = |
413 | { | 427 | { |
414 | .name = "irqsoff", | 428 | .name = "irqsoff", |
415 | .init = irqsoff_tracer_init, | 429 | .init = irqsoff_tracer_init, |
416 | .reset = irqsoff_tracer_reset, | 430 | .reset = irqsoff_tracer_reset, |
431 | .start = irqsoff_tracer_start, | ||
432 | .stop = irqsoff_tracer_stop, | ||
417 | .open = irqsoff_tracer_open, | 433 | .open = irqsoff_tracer_open, |
418 | .close = irqsoff_tracer_close, | 434 | .close = irqsoff_tracer_close, |
419 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
420 | .print_max = 1, | 435 | .print_max = 1, |
421 | #ifdef CONFIG_FTRACE_SELFTEST | 436 | #ifdef CONFIG_FTRACE_SELFTEST |
422 | .selftest = trace_selftest_startup_irqsoff, | 437 | .selftest = trace_selftest_startup_irqsoff, |
@@ -428,11 +443,12 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
428 | #endif | 443 | #endif |
429 | 444 | ||
430 | #ifdef CONFIG_PREEMPT_TRACER | 445 | #ifdef CONFIG_PREEMPT_TRACER |
431 | static void preemptoff_tracer_init(struct trace_array *tr) | 446 | static int preemptoff_tracer_init(struct trace_array *tr) |
432 | { | 447 | { |
433 | trace_type = TRACER_PREEMPT_OFF; | 448 | trace_type = TRACER_PREEMPT_OFF; |
434 | 449 | ||
435 | __irqsoff_tracer_init(tr); | 450 | __irqsoff_tracer_init(tr); |
451 | return 0; | ||
436 | } | 452 | } |
437 | 453 | ||
438 | static struct tracer preemptoff_tracer __read_mostly = | 454 | static struct tracer preemptoff_tracer __read_mostly = |
@@ -440,9 +456,10 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
440 | .name = "preemptoff", | 456 | .name = "preemptoff", |
441 | .init = preemptoff_tracer_init, | 457 | .init = preemptoff_tracer_init, |
442 | .reset = irqsoff_tracer_reset, | 458 | .reset = irqsoff_tracer_reset, |
459 | .start = irqsoff_tracer_start, | ||
460 | .stop = irqsoff_tracer_stop, | ||
443 | .open = irqsoff_tracer_open, | 461 | .open = irqsoff_tracer_open, |
444 | .close = irqsoff_tracer_close, | 462 | .close = irqsoff_tracer_close, |
445 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
446 | .print_max = 1, | 463 | .print_max = 1, |
447 | #ifdef CONFIG_FTRACE_SELFTEST | 464 | #ifdef CONFIG_FTRACE_SELFTEST |
448 | .selftest = trace_selftest_startup_preemptoff, | 465 | .selftest = trace_selftest_startup_preemptoff, |
@@ -456,11 +473,12 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
456 | #if defined(CONFIG_IRQSOFF_TRACER) && \ | 473 | #if defined(CONFIG_IRQSOFF_TRACER) && \ |
457 | defined(CONFIG_PREEMPT_TRACER) | 474 | defined(CONFIG_PREEMPT_TRACER) |
458 | 475 | ||
459 | static void preemptirqsoff_tracer_init(struct trace_array *tr) | 476 | static int preemptirqsoff_tracer_init(struct trace_array *tr) |
460 | { | 477 | { |
461 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; | 478 | trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; |
462 | 479 | ||
463 | __irqsoff_tracer_init(tr); | 480 | __irqsoff_tracer_init(tr); |
481 | return 0; | ||
464 | } | 482 | } |
465 | 483 | ||
466 | static struct tracer preemptirqsoff_tracer __read_mostly = | 484 | static struct tracer preemptirqsoff_tracer __read_mostly = |
@@ -468,9 +486,10 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
468 | .name = "preemptirqsoff", | 486 | .name = "preemptirqsoff", |
469 | .init = preemptirqsoff_tracer_init, | 487 | .init = preemptirqsoff_tracer_init, |
470 | .reset = irqsoff_tracer_reset, | 488 | .reset = irqsoff_tracer_reset, |
489 | .start = irqsoff_tracer_start, | ||
490 | .stop = irqsoff_tracer_stop, | ||
471 | .open = irqsoff_tracer_open, | 491 | .open = irqsoff_tracer_open, |
472 | .close = irqsoff_tracer_close, | 492 | .close = irqsoff_tracer_close, |
473 | .ctrl_update = irqsoff_tracer_ctrl_update, | ||
474 | .print_max = 1, | 493 | .print_max = 1, |
475 | #ifdef CONFIG_FTRACE_SELFTEST | 494 | #ifdef CONFIG_FTRACE_SELFTEST |
476 | .selftest = trace_selftest_startup_preemptirqsoff, | 495 | .selftest = trace_selftest_startup_preemptirqsoff, |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index e62cbf78eab6..fffcb069f1dc 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -22,44 +22,35 @@ static unsigned long prev_overruns; | |||
22 | 22 | ||
23 | static void mmio_reset_data(struct trace_array *tr) | 23 | static void mmio_reset_data(struct trace_array *tr) |
24 | { | 24 | { |
25 | int cpu; | ||
26 | |||
27 | overrun_detected = false; | 25 | overrun_detected = false; |
28 | prev_overruns = 0; | 26 | prev_overruns = 0; |
29 | tr->time_start = ftrace_now(tr->cpu); | ||
30 | 27 | ||
31 | for_each_online_cpu(cpu) | 28 | tracing_reset_online_cpus(tr); |
32 | tracing_reset(tr, cpu); | ||
33 | } | 29 | } |
34 | 30 | ||
35 | static void mmio_trace_init(struct trace_array *tr) | 31 | static int mmio_trace_init(struct trace_array *tr) |
36 | { | 32 | { |
37 | pr_debug("in %s\n", __func__); | 33 | pr_debug("in %s\n", __func__); |
38 | mmio_trace_array = tr; | 34 | mmio_trace_array = tr; |
39 | if (tr->ctrl) { | 35 | |
40 | mmio_reset_data(tr); | 36 | mmio_reset_data(tr); |
41 | enable_mmiotrace(); | 37 | enable_mmiotrace(); |
42 | } | 38 | return 0; |
43 | } | 39 | } |
44 | 40 | ||
45 | static void mmio_trace_reset(struct trace_array *tr) | 41 | static void mmio_trace_reset(struct trace_array *tr) |
46 | { | 42 | { |
47 | pr_debug("in %s\n", __func__); | 43 | pr_debug("in %s\n", __func__); |
48 | if (tr->ctrl) | 44 | |
49 | disable_mmiotrace(); | 45 | disable_mmiotrace(); |
50 | mmio_reset_data(tr); | 46 | mmio_reset_data(tr); |
51 | mmio_trace_array = NULL; | 47 | mmio_trace_array = NULL; |
52 | } | 48 | } |
53 | 49 | ||
54 | static void mmio_trace_ctrl_update(struct trace_array *tr) | 50 | static void mmio_trace_start(struct trace_array *tr) |
55 | { | 51 | { |
56 | pr_debug("in %s\n", __func__); | 52 | pr_debug("in %s\n", __func__); |
57 | if (tr->ctrl) { | 53 | mmio_reset_data(tr); |
58 | mmio_reset_data(tr); | ||
59 | enable_mmiotrace(); | ||
60 | } else { | ||
61 | disable_mmiotrace(); | ||
62 | } | ||
63 | } | 54 | } |
64 | 55 | ||
65 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | 56 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) |
@@ -296,10 +287,10 @@ static struct tracer mmio_tracer __read_mostly = | |||
296 | .name = "mmiotrace", | 287 | .name = "mmiotrace", |
297 | .init = mmio_trace_init, | 288 | .init = mmio_trace_init, |
298 | .reset = mmio_trace_reset, | 289 | .reset = mmio_trace_reset, |
290 | .start = mmio_trace_start, | ||
299 | .pipe_open = mmio_pipe_open, | 291 | .pipe_open = mmio_pipe_open, |
300 | .close = mmio_close, | 292 | .close = mmio_close, |
301 | .read = mmio_read, | 293 | .read = mmio_read, |
302 | .ctrl_update = mmio_trace_ctrl_update, | ||
303 | .print_line = mmio_print_line, | 294 | .print_line = mmio_print_line, |
304 | }; | 295 | }; |
305 | 296 | ||
@@ -371,5 +362,5 @@ void mmio_trace_mapping(struct mmiotrace_map *map) | |||
371 | 362 | ||
372 | int mmio_trace_printk(const char *fmt, va_list args) | 363 | int mmio_trace_printk(const char *fmt, va_list args) |
373 | { | 364 | { |
374 | return trace_vprintk(0, fmt, args); | 365 | return trace_vprintk(0, -1, fmt, args); |
375 | } | 366 | } |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index 4592b4862515..b9767acd30ac 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -12,6 +12,27 @@ | |||
12 | 12 | ||
13 | #include "trace.h" | 13 | #include "trace.h" |
14 | 14 | ||
15 | /* Our two options */ | ||
16 | enum { | ||
17 | TRACE_NOP_OPT_ACCEPT = 0x1, | ||
18 | TRACE_NOP_OPT_REFUSE = 0x2 | ||
19 | }; | ||
20 | |||
21 | /* Options for the tracer (see trace_options file) */ | ||
22 | static struct tracer_opt nop_opts[] = { | ||
23 | /* Option that will be accepted by set_flag callback */ | ||
24 | { TRACER_OPT(test_nop_accept, TRACE_NOP_OPT_ACCEPT) }, | ||
25 | /* Option that will be refused by set_flag callback */ | ||
26 | { TRACER_OPT(test_nop_refuse, TRACE_NOP_OPT_REFUSE) }, | ||
27 | { } /* Always set a last empty entry */ | ||
28 | }; | ||
29 | |||
30 | static struct tracer_flags nop_flags = { | ||
31 | /* You can check your flags value here when you want. */ | ||
32 | .val = 0, /* By default: all flags disabled */ | ||
33 | .opts = nop_opts | ||
34 | }; | ||
35 | |||
15 | static struct trace_array *ctx_trace; | 36 | static struct trace_array *ctx_trace; |
16 | 37 | ||
17 | static void start_nop_trace(struct trace_array *tr) | 38 | static void start_nop_trace(struct trace_array *tr) |
@@ -24,7 +45,7 @@ static void stop_nop_trace(struct trace_array *tr) | |||
24 | /* Nothing to do! */ | 45 | /* Nothing to do! */ |
25 | } | 46 | } |
26 | 47 | ||
27 | static void nop_trace_init(struct trace_array *tr) | 48 | static int nop_trace_init(struct trace_array *tr) |
28 | { | 49 | { |
29 | int cpu; | 50 | int cpu; |
30 | ctx_trace = tr; | 51 | ctx_trace = tr; |
@@ -32,33 +53,53 @@ static void nop_trace_init(struct trace_array *tr) | |||
32 | for_each_online_cpu(cpu) | 53 | for_each_online_cpu(cpu) |
33 | tracing_reset(tr, cpu); | 54 | tracing_reset(tr, cpu); |
34 | 55 | ||
35 | if (tr->ctrl) | 56 | start_nop_trace(tr); |
36 | start_nop_trace(tr); | 57 | return 0; |
37 | } | 58 | } |
38 | 59 | ||
39 | static void nop_trace_reset(struct trace_array *tr) | 60 | static void nop_trace_reset(struct trace_array *tr) |
40 | { | 61 | { |
41 | if (tr->ctrl) | 62 | stop_nop_trace(tr); |
42 | stop_nop_trace(tr); | ||
43 | } | 63 | } |
44 | 64 | ||
45 | static void nop_trace_ctrl_update(struct trace_array *tr) | 65 | /* It only serves as a signal handler and a callback to |
66 | * accept or refuse tthe setting of a flag. | ||
67 | * If you don't implement it, then the flag setting will be | ||
68 | * automatically accepted. | ||
69 | */ | ||
70 | static int nop_set_flag(u32 old_flags, u32 bit, int set) | ||
46 | { | 71 | { |
47 | /* When starting a new trace, reset the buffers */ | 72 | /* |
48 | if (tr->ctrl) | 73 | * Note that you don't need to update nop_flags.val yourself. |
49 | start_nop_trace(tr); | 74 | * The tracing Api will do it automatically if you return 0 |
50 | else | 75 | */ |
51 | stop_nop_trace(tr); | 76 | if (bit == TRACE_NOP_OPT_ACCEPT) { |
77 | printk(KERN_DEBUG "nop_test_accept flag set to %d: we accept." | ||
78 | " Now cat trace_options to see the result\n", | ||
79 | set); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | if (bit == TRACE_NOP_OPT_REFUSE) { | ||
84 | printk(KERN_DEBUG "nop_test_refuse flag set to %d: we refuse." | ||
85 | "Now cat trace_options to see the result\n", | ||
86 | set); | ||
87 | return -EINVAL; | ||
88 | } | ||
89 | |||
90 | return 0; | ||
52 | } | 91 | } |
53 | 92 | ||
93 | |||
54 | struct tracer nop_trace __read_mostly = | 94 | struct tracer nop_trace __read_mostly = |
55 | { | 95 | { |
56 | .name = "nop", | 96 | .name = "nop", |
57 | .init = nop_trace_init, | 97 | .init = nop_trace_init, |
58 | .reset = nop_trace_reset, | 98 | .reset = nop_trace_reset, |
59 | .ctrl_update = nop_trace_ctrl_update, | ||
60 | #ifdef CONFIG_FTRACE_SELFTEST | 99 | #ifdef CONFIG_FTRACE_SELFTEST |
61 | .selftest = trace_selftest_startup_nop, | 100 | .selftest = trace_selftest_startup_nop, |
62 | #endif | 101 | #endif |
102 | .flags = &nop_flags, | ||
103 | .set_flag = nop_set_flag | ||
63 | }; | 104 | }; |
64 | 105 | ||
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c new file mode 100644 index 000000000000..a7172a352f62 --- /dev/null +++ b/kernel/trace/trace_power.c | |||
@@ -0,0 +1,179 @@ | |||
1 | /* | ||
2 | * ring buffer based C-state tracer | ||
3 | * | ||
4 | * Arjan van de Ven <arjan@linux.intel.com> | ||
5 | * Copyright (C) 2008 Intel Corporation | ||
6 | * | ||
7 | * Much is borrowed from trace_boot.c which is | ||
8 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <linux/init.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/ftrace.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/module.h> | ||
17 | |||
18 | #include "trace.h" | ||
19 | |||
20 | static struct trace_array *power_trace; | ||
21 | static int __read_mostly trace_power_enabled; | ||
22 | |||
23 | |||
24 | static void start_power_trace(struct trace_array *tr) | ||
25 | { | ||
26 | trace_power_enabled = 1; | ||
27 | } | ||
28 | |||
29 | static void stop_power_trace(struct trace_array *tr) | ||
30 | { | ||
31 | trace_power_enabled = 0; | ||
32 | } | ||
33 | |||
34 | |||
35 | static int power_trace_init(struct trace_array *tr) | ||
36 | { | ||
37 | int cpu; | ||
38 | power_trace = tr; | ||
39 | |||
40 | trace_power_enabled = 1; | ||
41 | |||
42 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
43 | tracing_reset(tr, cpu); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static enum print_line_t power_print_line(struct trace_iterator *iter) | ||
48 | { | ||
49 | int ret = 0; | ||
50 | struct trace_entry *entry = iter->ent; | ||
51 | struct trace_power *field ; | ||
52 | struct power_trace *it; | ||
53 | struct trace_seq *s = &iter->seq; | ||
54 | struct timespec stamp; | ||
55 | struct timespec duration; | ||
56 | |||
57 | trace_assign_type(field, entry); | ||
58 | it = &field->state_data; | ||
59 | stamp = ktime_to_timespec(it->stamp); | ||
60 | duration = ktime_to_timespec(ktime_sub(it->end, it->stamp)); | ||
61 | |||
62 | if (entry->type == TRACE_POWER) { | ||
63 | if (it->type == POWER_CSTATE) | ||
64 | ret = trace_seq_printf(s, "[%5ld.%09ld] CSTATE: Going to C%i on cpu %i for %ld.%09ld\n", | ||
65 | stamp.tv_sec, | ||
66 | stamp.tv_nsec, | ||
67 | it->state, iter->cpu, | ||
68 | duration.tv_sec, | ||
69 | duration.tv_nsec); | ||
70 | if (it->type == POWER_PSTATE) | ||
71 | ret = trace_seq_printf(s, "[%5ld.%09ld] PSTATE: Going to P%i on cpu %i\n", | ||
72 | stamp.tv_sec, | ||
73 | stamp.tv_nsec, | ||
74 | it->state, iter->cpu); | ||
75 | if (!ret) | ||
76 | return TRACE_TYPE_PARTIAL_LINE; | ||
77 | return TRACE_TYPE_HANDLED; | ||
78 | } | ||
79 | return TRACE_TYPE_UNHANDLED; | ||
80 | } | ||
81 | |||
82 | static struct tracer power_tracer __read_mostly = | ||
83 | { | ||
84 | .name = "power", | ||
85 | .init = power_trace_init, | ||
86 | .start = start_power_trace, | ||
87 | .stop = stop_power_trace, | ||
88 | .reset = stop_power_trace, | ||
89 | .print_line = power_print_line, | ||
90 | }; | ||
91 | |||
92 | static int init_power_trace(void) | ||
93 | { | ||
94 | return register_tracer(&power_tracer); | ||
95 | } | ||
96 | device_initcall(init_power_trace); | ||
97 | |||
98 | void trace_power_start(struct power_trace *it, unsigned int type, | ||
99 | unsigned int level) | ||
100 | { | ||
101 | if (!trace_power_enabled) | ||
102 | return; | ||
103 | |||
104 | memset(it, 0, sizeof(struct power_trace)); | ||
105 | it->state = level; | ||
106 | it->type = type; | ||
107 | it->stamp = ktime_get(); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(trace_power_start); | ||
110 | |||
111 | |||
112 | void trace_power_end(struct power_trace *it) | ||
113 | { | ||
114 | struct ring_buffer_event *event; | ||
115 | struct trace_power *entry; | ||
116 | struct trace_array_cpu *data; | ||
117 | unsigned long irq_flags; | ||
118 | struct trace_array *tr = power_trace; | ||
119 | |||
120 | if (!trace_power_enabled) | ||
121 | return; | ||
122 | |||
123 | preempt_disable(); | ||
124 | it->end = ktime_get(); | ||
125 | data = tr->data[smp_processor_id()]; | ||
126 | |||
127 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
128 | &irq_flags); | ||
129 | if (!event) | ||
130 | goto out; | ||
131 | entry = ring_buffer_event_data(event); | ||
132 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
133 | entry->ent.type = TRACE_POWER; | ||
134 | entry->state_data = *it; | ||
135 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
136 | |||
137 | trace_wake_up(); | ||
138 | |||
139 | out: | ||
140 | preempt_enable(); | ||
141 | } | ||
142 | EXPORT_SYMBOL_GPL(trace_power_end); | ||
143 | |||
144 | void trace_power_mark(struct power_trace *it, unsigned int type, | ||
145 | unsigned int level) | ||
146 | { | ||
147 | struct ring_buffer_event *event; | ||
148 | struct trace_power *entry; | ||
149 | struct trace_array_cpu *data; | ||
150 | unsigned long irq_flags; | ||
151 | struct trace_array *tr = power_trace; | ||
152 | |||
153 | if (!trace_power_enabled) | ||
154 | return; | ||
155 | |||
156 | memset(it, 0, sizeof(struct power_trace)); | ||
157 | it->state = level; | ||
158 | it->type = type; | ||
159 | it->stamp = ktime_get(); | ||
160 | preempt_disable(); | ||
161 | it->end = it->stamp; | ||
162 | data = tr->data[smp_processor_id()]; | ||
163 | |||
164 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
165 | &irq_flags); | ||
166 | if (!event) | ||
167 | goto out; | ||
168 | entry = ring_buffer_event_data(event); | ||
169 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
170 | entry->ent.type = TRACE_POWER; | ||
171 | entry->state_data = *it; | ||
172 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
173 | |||
174 | trace_wake_up(); | ||
175 | |||
176 | out: | ||
177 | preempt_enable(); | ||
178 | } | ||
179 | EXPORT_SYMBOL_GPL(trace_power_mark); | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index b8f56beb1a62..df175cb4564f 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -16,7 +16,8 @@ | |||
16 | 16 | ||
17 | static struct trace_array *ctx_trace; | 17 | static struct trace_array *ctx_trace; |
18 | static int __read_mostly tracer_enabled; | 18 | static int __read_mostly tracer_enabled; |
19 | static atomic_t sched_ref; | 19 | static int sched_ref; |
20 | static DEFINE_MUTEX(sched_register_mutex); | ||
20 | 21 | ||
21 | static void | 22 | static void |
22 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, | 23 | probe_sched_switch(struct rq *__rq, struct task_struct *prev, |
@@ -27,7 +28,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
27 | int cpu; | 28 | int cpu; |
28 | int pc; | 29 | int pc; |
29 | 30 | ||
30 | if (!atomic_read(&sched_ref)) | 31 | if (!sched_ref) |
31 | return; | 32 | return; |
32 | 33 | ||
33 | tracing_record_cmdline(prev); | 34 | tracing_record_cmdline(prev); |
@@ -48,7 +49,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
48 | } | 49 | } |
49 | 50 | ||
50 | static void | 51 | static void |
51 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | 52 | probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) |
52 | { | 53 | { |
53 | struct trace_array_cpu *data; | 54 | struct trace_array_cpu *data; |
54 | unsigned long flags; | 55 | unsigned long flags; |
@@ -71,16 +72,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) | |||
71 | local_irq_restore(flags); | 72 | local_irq_restore(flags); |
72 | } | 73 | } |
73 | 74 | ||
74 | static void sched_switch_reset(struct trace_array *tr) | ||
75 | { | ||
76 | int cpu; | ||
77 | |||
78 | tr->time_start = ftrace_now(tr->cpu); | ||
79 | |||
80 | for_each_online_cpu(cpu) | ||
81 | tracing_reset(tr, cpu); | ||
82 | } | ||
83 | |||
84 | static int tracing_sched_register(void) | 75 | static int tracing_sched_register(void) |
85 | { | 76 | { |
86 | int ret; | 77 | int ret; |
@@ -123,20 +114,18 @@ static void tracing_sched_unregister(void) | |||
123 | 114 | ||
124 | static void tracing_start_sched_switch(void) | 115 | static void tracing_start_sched_switch(void) |
125 | { | 116 | { |
126 | long ref; | 117 | mutex_lock(&sched_register_mutex); |
127 | 118 | if (!(sched_ref++)) | |
128 | ref = atomic_inc_return(&sched_ref); | ||
129 | if (ref == 1) | ||
130 | tracing_sched_register(); | 119 | tracing_sched_register(); |
120 | mutex_unlock(&sched_register_mutex); | ||
131 | } | 121 | } |
132 | 122 | ||
133 | static void tracing_stop_sched_switch(void) | 123 | static void tracing_stop_sched_switch(void) |
134 | { | 124 | { |
135 | long ref; | 125 | mutex_lock(&sched_register_mutex); |
136 | 126 | if (!(--sched_ref)) | |
137 | ref = atomic_dec_and_test(&sched_ref); | ||
138 | if (ref) | ||
139 | tracing_sched_unregister(); | 127 | tracing_sched_unregister(); |
128 | mutex_unlock(&sched_register_mutex); | ||
140 | } | 129 | } |
141 | 130 | ||
142 | void tracing_start_cmdline_record(void) | 131 | void tracing_start_cmdline_record(void) |
@@ -149,40 +138,86 @@ void tracing_stop_cmdline_record(void) | |||
149 | tracing_stop_sched_switch(); | 138 | tracing_stop_sched_switch(); |
150 | } | 139 | } |
151 | 140 | ||
141 | /** | ||
142 | * tracing_start_sched_switch_record - start tracing context switches | ||
143 | * | ||
144 | * Turns on context switch tracing for a tracer. | ||
145 | */ | ||
146 | void tracing_start_sched_switch_record(void) | ||
147 | { | ||
148 | if (unlikely(!ctx_trace)) { | ||
149 | WARN_ON(1); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | tracing_start_sched_switch(); | ||
154 | |||
155 | mutex_lock(&sched_register_mutex); | ||
156 | tracer_enabled++; | ||
157 | mutex_unlock(&sched_register_mutex); | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * tracing_stop_sched_switch_record - start tracing context switches | ||
162 | * | ||
163 | * Turns off context switch tracing for a tracer. | ||
164 | */ | ||
165 | void tracing_stop_sched_switch_record(void) | ||
166 | { | ||
167 | mutex_lock(&sched_register_mutex); | ||
168 | tracer_enabled--; | ||
169 | WARN_ON(tracer_enabled < 0); | ||
170 | mutex_unlock(&sched_register_mutex); | ||
171 | |||
172 | tracing_stop_sched_switch(); | ||
173 | } | ||
174 | |||
175 | /** | ||
176 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | ||
177 | * @tr: trace array pointer to assign | ||
178 | * | ||
179 | * Some tracers might want to record the context switches in their | ||
180 | * trace. This function lets those tracers assign the trace array | ||
181 | * to use. | ||
182 | */ | ||
183 | void tracing_sched_switch_assign_trace(struct trace_array *tr) | ||
184 | { | ||
185 | ctx_trace = tr; | ||
186 | } | ||
187 | |||
152 | static void start_sched_trace(struct trace_array *tr) | 188 | static void start_sched_trace(struct trace_array *tr) |
153 | { | 189 | { |
154 | sched_switch_reset(tr); | 190 | tracing_reset_online_cpus(tr); |
155 | tracing_start_cmdline_record(); | 191 | tracing_start_sched_switch_record(); |
156 | tracer_enabled = 1; | ||
157 | } | 192 | } |
158 | 193 | ||
159 | static void stop_sched_trace(struct trace_array *tr) | 194 | static void stop_sched_trace(struct trace_array *tr) |
160 | { | 195 | { |
161 | tracer_enabled = 0; | 196 | tracing_stop_sched_switch_record(); |
162 | tracing_stop_cmdline_record(); | ||
163 | } | 197 | } |
164 | 198 | ||
165 | static void sched_switch_trace_init(struct trace_array *tr) | 199 | static int sched_switch_trace_init(struct trace_array *tr) |
166 | { | 200 | { |
167 | ctx_trace = tr; | 201 | ctx_trace = tr; |
168 | 202 | start_sched_trace(tr); | |
169 | if (tr->ctrl) | 203 | return 0; |
170 | start_sched_trace(tr); | ||
171 | } | 204 | } |
172 | 205 | ||
173 | static void sched_switch_trace_reset(struct trace_array *tr) | 206 | static void sched_switch_trace_reset(struct trace_array *tr) |
174 | { | 207 | { |
175 | if (tr->ctrl) | 208 | if (sched_ref) |
176 | stop_sched_trace(tr); | 209 | stop_sched_trace(tr); |
177 | } | 210 | } |
178 | 211 | ||
179 | static void sched_switch_trace_ctrl_update(struct trace_array *tr) | 212 | static void sched_switch_trace_start(struct trace_array *tr) |
180 | { | 213 | { |
181 | /* When starting a new trace, reset the buffers */ | 214 | tracing_reset_online_cpus(tr); |
182 | if (tr->ctrl) | 215 | tracing_start_sched_switch(); |
183 | start_sched_trace(tr); | 216 | } |
184 | else | 217 | |
185 | stop_sched_trace(tr); | 218 | static void sched_switch_trace_stop(struct trace_array *tr) |
219 | { | ||
220 | tracing_stop_sched_switch(); | ||
186 | } | 221 | } |
187 | 222 | ||
188 | static struct tracer sched_switch_trace __read_mostly = | 223 | static struct tracer sched_switch_trace __read_mostly = |
@@ -190,7 +225,8 @@ static struct tracer sched_switch_trace __read_mostly = | |||
190 | .name = "sched_switch", | 225 | .name = "sched_switch", |
191 | .init = sched_switch_trace_init, | 226 | .init = sched_switch_trace_init, |
192 | .reset = sched_switch_trace_reset, | 227 | .reset = sched_switch_trace_reset, |
193 | .ctrl_update = sched_switch_trace_ctrl_update, | 228 | .start = sched_switch_trace_start, |
229 | .stop = sched_switch_trace_stop, | ||
194 | #ifdef CONFIG_FTRACE_SELFTEST | 230 | #ifdef CONFIG_FTRACE_SELFTEST |
195 | .selftest = trace_selftest_startup_sched_switch, | 231 | .selftest = trace_selftest_startup_sched_switch, |
196 | #endif | 232 | #endif |
@@ -198,14 +234,7 @@ static struct tracer sched_switch_trace __read_mostly = | |||
198 | 234 | ||
199 | __init static int init_sched_switch_trace(void) | 235 | __init static int init_sched_switch_trace(void) |
200 | { | 236 | { |
201 | int ret = 0; | ||
202 | |||
203 | if (atomic_read(&sched_ref)) | ||
204 | ret = tracing_sched_register(); | ||
205 | if (ret) { | ||
206 | pr_info("error registering scheduler trace\n"); | ||
207 | return ret; | ||
208 | } | ||
209 | return register_tracer(&sched_switch_trace); | 237 | return register_tracer(&sched_switch_trace); |
210 | } | 238 | } |
211 | device_initcall(init_sched_switch_trace); | 239 | device_initcall(init_sched_switch_trace); |
240 | |||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 3ae93f16b565..43586b689e31 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
50 | return; | 50 | return; |
51 | 51 | ||
52 | pc = preempt_count(); | 52 | pc = preempt_count(); |
53 | resched = need_resched(); | 53 | resched = ftrace_preempt_disable(); |
54 | preempt_disable_notrace(); | ||
55 | 54 | ||
56 | cpu = raw_smp_processor_id(); | 55 | cpu = raw_smp_processor_id(); |
57 | data = tr->data[cpu]; | 56 | data = tr->data[cpu]; |
@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
81 | out: | 80 | out: |
82 | atomic_dec(&data->disabled); | 81 | atomic_dec(&data->disabled); |
83 | 82 | ||
84 | /* | 83 | ftrace_preempt_enable(resched); |
85 | * To prevent recursion from the scheduler, if the | ||
86 | * resched flag was set before we entered, then | ||
87 | * don't reschedule. | ||
88 | */ | ||
89 | if (resched) | ||
90 | preempt_enable_no_resched_notrace(); | ||
91 | else | ||
92 | preempt_enable_notrace(); | ||
93 | } | 84 | } |
94 | 85 | ||
95 | static struct ftrace_ops trace_ops __read_mostly = | 86 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -220,7 +211,7 @@ static void wakeup_reset(struct trace_array *tr) | |||
220 | } | 211 | } |
221 | 212 | ||
222 | static void | 213 | static void |
223 | probe_wakeup(struct rq *rq, struct task_struct *p) | 214 | probe_wakeup(struct rq *rq, struct task_struct *p, int success) |
224 | { | 215 | { |
225 | int cpu = smp_processor_id(); | 216 | int cpu = smp_processor_id(); |
226 | unsigned long flags; | 217 | unsigned long flags; |
@@ -271,6 +262,12 @@ out: | |||
271 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 262 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
272 | } | 263 | } |
273 | 264 | ||
265 | /* | ||
266 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
267 | * variable when we disable it when we open a trace output file. | ||
268 | */ | ||
269 | static int save_tracer_enabled; | ||
270 | |||
274 | static void start_wakeup_tracer(struct trace_array *tr) | 271 | static void start_wakeup_tracer(struct trace_array *tr) |
275 | { | 272 | { |
276 | int ret; | 273 | int ret; |
@@ -309,7 +306,13 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
309 | 306 | ||
310 | register_ftrace_function(&trace_ops); | 307 | register_ftrace_function(&trace_ops); |
311 | 308 | ||
312 | tracer_enabled = 1; | 309 | if (tracing_is_enabled()) { |
310 | tracer_enabled = 1; | ||
311 | save_tracer_enabled = 1; | ||
312 | } else { | ||
313 | tracer_enabled = 0; | ||
314 | save_tracer_enabled = 0; | ||
315 | } | ||
313 | 316 | ||
314 | return; | 317 | return; |
315 | fail_deprobe_wake_new: | 318 | fail_deprobe_wake_new: |
@@ -321,49 +324,53 @@ fail_deprobe: | |||
321 | static void stop_wakeup_tracer(struct trace_array *tr) | 324 | static void stop_wakeup_tracer(struct trace_array *tr) |
322 | { | 325 | { |
323 | tracer_enabled = 0; | 326 | tracer_enabled = 0; |
327 | save_tracer_enabled = 0; | ||
324 | unregister_ftrace_function(&trace_ops); | 328 | unregister_ftrace_function(&trace_ops); |
325 | unregister_trace_sched_switch(probe_wakeup_sched_switch); | 329 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
326 | unregister_trace_sched_wakeup_new(probe_wakeup); | 330 | unregister_trace_sched_wakeup_new(probe_wakeup); |
327 | unregister_trace_sched_wakeup(probe_wakeup); | 331 | unregister_trace_sched_wakeup(probe_wakeup); |
328 | } | 332 | } |
329 | 333 | ||
330 | static void wakeup_tracer_init(struct trace_array *tr) | 334 | static int wakeup_tracer_init(struct trace_array *tr) |
331 | { | 335 | { |
332 | wakeup_trace = tr; | 336 | wakeup_trace = tr; |
333 | 337 | start_wakeup_tracer(tr); | |
334 | if (tr->ctrl) | 338 | return 0; |
335 | start_wakeup_tracer(tr); | ||
336 | } | 339 | } |
337 | 340 | ||
338 | static void wakeup_tracer_reset(struct trace_array *tr) | 341 | static void wakeup_tracer_reset(struct trace_array *tr) |
339 | { | 342 | { |
340 | if (tr->ctrl) { | 343 | stop_wakeup_tracer(tr); |
341 | stop_wakeup_tracer(tr); | 344 | /* make sure we put back any tasks we are tracing */ |
342 | /* make sure we put back any tasks we are tracing */ | 345 | wakeup_reset(tr); |
343 | wakeup_reset(tr); | 346 | } |
344 | } | 347 | |
348 | static void wakeup_tracer_start(struct trace_array *tr) | ||
349 | { | ||
350 | wakeup_reset(tr); | ||
351 | tracer_enabled = 1; | ||
352 | save_tracer_enabled = 1; | ||
345 | } | 353 | } |
346 | 354 | ||
347 | static void wakeup_tracer_ctrl_update(struct trace_array *tr) | 355 | static void wakeup_tracer_stop(struct trace_array *tr) |
348 | { | 356 | { |
349 | if (tr->ctrl) | 357 | tracer_enabled = 0; |
350 | start_wakeup_tracer(tr); | 358 | save_tracer_enabled = 0; |
351 | else | ||
352 | stop_wakeup_tracer(tr); | ||
353 | } | 359 | } |
354 | 360 | ||
355 | static void wakeup_tracer_open(struct trace_iterator *iter) | 361 | static void wakeup_tracer_open(struct trace_iterator *iter) |
356 | { | 362 | { |
357 | /* stop the trace while dumping */ | 363 | /* stop the trace while dumping */ |
358 | if (iter->tr->ctrl) | 364 | tracer_enabled = 0; |
359 | stop_wakeup_tracer(iter->tr); | ||
360 | } | 365 | } |
361 | 366 | ||
362 | static void wakeup_tracer_close(struct trace_iterator *iter) | 367 | static void wakeup_tracer_close(struct trace_iterator *iter) |
363 | { | 368 | { |
364 | /* forget about any processes we were recording */ | 369 | /* forget about any processes we were recording */ |
365 | if (iter->tr->ctrl) | 370 | if (save_tracer_enabled) { |
366 | start_wakeup_tracer(iter->tr); | 371 | wakeup_reset(iter->tr); |
372 | tracer_enabled = 1; | ||
373 | } | ||
367 | } | 374 | } |
368 | 375 | ||
369 | static struct tracer wakeup_tracer __read_mostly = | 376 | static struct tracer wakeup_tracer __read_mostly = |
@@ -371,9 +378,10 @@ static struct tracer wakeup_tracer __read_mostly = | |||
371 | .name = "wakeup", | 378 | .name = "wakeup", |
372 | .init = wakeup_tracer_init, | 379 | .init = wakeup_tracer_init, |
373 | .reset = wakeup_tracer_reset, | 380 | .reset = wakeup_tracer_reset, |
381 | .start = wakeup_tracer_start, | ||
382 | .stop = wakeup_tracer_stop, | ||
374 | .open = wakeup_tracer_open, | 383 | .open = wakeup_tracer_open, |
375 | .close = wakeup_tracer_close, | 384 | .close = wakeup_tracer_close, |
376 | .ctrl_update = wakeup_tracer_ctrl_update, | ||
377 | .print_max = 1, | 385 | .print_max = 1, |
378 | #ifdef CONFIG_FTRACE_SELFTEST | 386 | #ifdef CONFIG_FTRACE_SELFTEST |
379 | .selftest = trace_selftest_startup_wakeup, | 387 | .selftest = trace_selftest_startup_wakeup, |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 90bc752a7580..88c8eb70f54a 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -13,6 +13,7 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
13 | case TRACE_STACK: | 13 | case TRACE_STACK: |
14 | case TRACE_PRINT: | 14 | case TRACE_PRINT: |
15 | case TRACE_SPECIAL: | 15 | case TRACE_SPECIAL: |
16 | case TRACE_BRANCH: | ||
16 | return 1; | 17 | return 1; |
17 | } | 18 | } |
18 | return 0; | 19 | return 0; |
@@ -51,7 +52,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
51 | int cpu, ret = 0; | 52 | int cpu, ret = 0; |
52 | 53 | ||
53 | /* Don't allow flipping of max traces now */ | 54 | /* Don't allow flipping of max traces now */ |
54 | raw_local_irq_save(flags); | 55 | local_irq_save(flags); |
55 | __raw_spin_lock(&ftrace_max_lock); | 56 | __raw_spin_lock(&ftrace_max_lock); |
56 | 57 | ||
57 | cnt = ring_buffer_entries(tr->buffer); | 58 | cnt = ring_buffer_entries(tr->buffer); |
@@ -62,7 +63,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
62 | break; | 63 | break; |
63 | } | 64 | } |
64 | __raw_spin_unlock(&ftrace_max_lock); | 65 | __raw_spin_unlock(&ftrace_max_lock); |
65 | raw_local_irq_restore(flags); | 66 | local_irq_restore(flags); |
66 | 67 | ||
67 | if (count) | 68 | if (count) |
68 | *count = cnt; | 69 | *count = cnt; |
@@ -70,6 +71,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) | |||
70 | return ret; | 71 | return ret; |
71 | } | 72 | } |
72 | 73 | ||
74 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) | ||
75 | { | ||
76 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", | ||
77 | trace->name, init_ret); | ||
78 | } | ||
73 | #ifdef CONFIG_FUNCTION_TRACER | 79 | #ifdef CONFIG_FUNCTION_TRACER |
74 | 80 | ||
75 | #ifdef CONFIG_DYNAMIC_FTRACE | 81 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -110,8 +116,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
110 | ftrace_set_filter(func_name, strlen(func_name), 1); | 116 | ftrace_set_filter(func_name, strlen(func_name), 1); |
111 | 117 | ||
112 | /* enable tracing */ | 118 | /* enable tracing */ |
113 | tr->ctrl = 1; | 119 | ret = trace->init(tr); |
114 | trace->init(tr); | 120 | if (ret) { |
121 | warn_failed_init_tracer(trace, ret); | ||
122 | goto out; | ||
123 | } | ||
115 | 124 | ||
116 | /* Sleep for a 1/10 of a second */ | 125 | /* Sleep for a 1/10 of a second */ |
117 | msleep(100); | 126 | msleep(100); |
@@ -134,13 +143,13 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
134 | msleep(100); | 143 | msleep(100); |
135 | 144 | ||
136 | /* stop the tracing. */ | 145 | /* stop the tracing. */ |
137 | tr->ctrl = 0; | 146 | tracing_stop(); |
138 | trace->ctrl_update(tr); | ||
139 | ftrace_enabled = 0; | 147 | ftrace_enabled = 0; |
140 | 148 | ||
141 | /* check the trace buffer */ | 149 | /* check the trace buffer */ |
142 | ret = trace_test_buffer(tr, &count); | 150 | ret = trace_test_buffer(tr, &count); |
143 | trace->reset(tr); | 151 | trace->reset(tr); |
152 | tracing_start(); | ||
144 | 153 | ||
145 | /* we should only have one item */ | 154 | /* we should only have one item */ |
146 | if (!ret && count != 1) { | 155 | if (!ret && count != 1) { |
@@ -148,6 +157,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
148 | ret = -1; | 157 | ret = -1; |
149 | goto out; | 158 | goto out; |
150 | } | 159 | } |
160 | |||
151 | out: | 161 | out: |
152 | ftrace_enabled = save_ftrace_enabled; | 162 | ftrace_enabled = save_ftrace_enabled; |
153 | tracer_enabled = save_tracer_enabled; | 163 | tracer_enabled = save_tracer_enabled; |
@@ -180,18 +190,22 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
180 | ftrace_enabled = 1; | 190 | ftrace_enabled = 1; |
181 | tracer_enabled = 1; | 191 | tracer_enabled = 1; |
182 | 192 | ||
183 | tr->ctrl = 1; | 193 | ret = trace->init(tr); |
184 | trace->init(tr); | 194 | if (ret) { |
195 | warn_failed_init_tracer(trace, ret); | ||
196 | goto out; | ||
197 | } | ||
198 | |||
185 | /* Sleep for a 1/10 of a second */ | 199 | /* Sleep for a 1/10 of a second */ |
186 | msleep(100); | 200 | msleep(100); |
187 | /* stop the tracing. */ | 201 | /* stop the tracing. */ |
188 | tr->ctrl = 0; | 202 | tracing_stop(); |
189 | trace->ctrl_update(tr); | ||
190 | ftrace_enabled = 0; | 203 | ftrace_enabled = 0; |
191 | 204 | ||
192 | /* check the trace buffer */ | 205 | /* check the trace buffer */ |
193 | ret = trace_test_buffer(tr, &count); | 206 | ret = trace_test_buffer(tr, &count); |
194 | trace->reset(tr); | 207 | trace->reset(tr); |
208 | tracing_start(); | ||
195 | 209 | ||
196 | if (!ret && !count) { | 210 | if (!ret && !count) { |
197 | printk(KERN_CONT ".. no entries found .."); | 211 | printk(KERN_CONT ".. no entries found .."); |
@@ -223,8 +237,12 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
223 | int ret; | 237 | int ret; |
224 | 238 | ||
225 | /* start the tracing */ | 239 | /* start the tracing */ |
226 | tr->ctrl = 1; | 240 | ret = trace->init(tr); |
227 | trace->init(tr); | 241 | if (ret) { |
242 | warn_failed_init_tracer(trace, ret); | ||
243 | return ret; | ||
244 | } | ||
245 | |||
228 | /* reset the max latency */ | 246 | /* reset the max latency */ |
229 | tracing_max_latency = 0; | 247 | tracing_max_latency = 0; |
230 | /* disable interrupts for a bit */ | 248 | /* disable interrupts for a bit */ |
@@ -232,13 +250,13 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
232 | udelay(100); | 250 | udelay(100); |
233 | local_irq_enable(); | 251 | local_irq_enable(); |
234 | /* stop the tracing. */ | 252 | /* stop the tracing. */ |
235 | tr->ctrl = 0; | 253 | tracing_stop(); |
236 | trace->ctrl_update(tr); | ||
237 | /* check both trace buffers */ | 254 | /* check both trace buffers */ |
238 | ret = trace_test_buffer(tr, NULL); | 255 | ret = trace_test_buffer(tr, NULL); |
239 | if (!ret) | 256 | if (!ret) |
240 | ret = trace_test_buffer(&max_tr, &count); | 257 | ret = trace_test_buffer(&max_tr, &count); |
241 | trace->reset(tr); | 258 | trace->reset(tr); |
259 | tracing_start(); | ||
242 | 260 | ||
243 | if (!ret && !count) { | 261 | if (!ret && !count) { |
244 | printk(KERN_CONT ".. no entries found .."); | 262 | printk(KERN_CONT ".. no entries found .."); |
@@ -259,9 +277,26 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
259 | unsigned long count; | 277 | unsigned long count; |
260 | int ret; | 278 | int ret; |
261 | 279 | ||
280 | /* | ||
281 | * Now that the big kernel lock is no longer preemptable, | ||
282 | * and this is called with the BKL held, it will always | ||
283 | * fail. If preemption is already disabled, simply | ||
284 | * pass the test. When the BKL is removed, or becomes | ||
285 | * preemptible again, we will once again test this, | ||
286 | * so keep it in. | ||
287 | */ | ||
288 | if (preempt_count()) { | ||
289 | printk(KERN_CONT "can not test ... force "); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
262 | /* start the tracing */ | 293 | /* start the tracing */ |
263 | tr->ctrl = 1; | 294 | ret = trace->init(tr); |
264 | trace->init(tr); | 295 | if (ret) { |
296 | warn_failed_init_tracer(trace, ret); | ||
297 | return ret; | ||
298 | } | ||
299 | |||
265 | /* reset the max latency */ | 300 | /* reset the max latency */ |
266 | tracing_max_latency = 0; | 301 | tracing_max_latency = 0; |
267 | /* disable preemption for a bit */ | 302 | /* disable preemption for a bit */ |
@@ -269,13 +304,13 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
269 | udelay(100); | 304 | udelay(100); |
270 | preempt_enable(); | 305 | preempt_enable(); |
271 | /* stop the tracing. */ | 306 | /* stop the tracing. */ |
272 | tr->ctrl = 0; | 307 | tracing_stop(); |
273 | trace->ctrl_update(tr); | ||
274 | /* check both trace buffers */ | 308 | /* check both trace buffers */ |
275 | ret = trace_test_buffer(tr, NULL); | 309 | ret = trace_test_buffer(tr, NULL); |
276 | if (!ret) | 310 | if (!ret) |
277 | ret = trace_test_buffer(&max_tr, &count); | 311 | ret = trace_test_buffer(&max_tr, &count); |
278 | trace->reset(tr); | 312 | trace->reset(tr); |
313 | tracing_start(); | ||
279 | 314 | ||
280 | if (!ret && !count) { | 315 | if (!ret && !count) { |
281 | printk(KERN_CONT ".. no entries found .."); | 316 | printk(KERN_CONT ".. no entries found .."); |
@@ -296,9 +331,25 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
296 | unsigned long count; | 331 | unsigned long count; |
297 | int ret; | 332 | int ret; |
298 | 333 | ||
334 | /* | ||
335 | * Now that the big kernel lock is no longer preemptable, | ||
336 | * and this is called with the BKL held, it will always | ||
337 | * fail. If preemption is already disabled, simply | ||
338 | * pass the test. When the BKL is removed, or becomes | ||
339 | * preemptible again, we will once again test this, | ||
340 | * so keep it in. | ||
341 | */ | ||
342 | if (preempt_count()) { | ||
343 | printk(KERN_CONT "can not test ... force "); | ||
344 | return 0; | ||
345 | } | ||
346 | |||
299 | /* start the tracing */ | 347 | /* start the tracing */ |
300 | tr->ctrl = 1; | 348 | ret = trace->init(tr); |
301 | trace->init(tr); | 349 | if (ret) { |
350 | warn_failed_init_tracer(trace, ret); | ||
351 | goto out; | ||
352 | } | ||
302 | 353 | ||
303 | /* reset the max latency */ | 354 | /* reset the max latency */ |
304 | tracing_max_latency = 0; | 355 | tracing_max_latency = 0; |
@@ -312,27 +363,30 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
312 | local_irq_enable(); | 363 | local_irq_enable(); |
313 | 364 | ||
314 | /* stop the tracing. */ | 365 | /* stop the tracing. */ |
315 | tr->ctrl = 0; | 366 | tracing_stop(); |
316 | trace->ctrl_update(tr); | ||
317 | /* check both trace buffers */ | 367 | /* check both trace buffers */ |
318 | ret = trace_test_buffer(tr, NULL); | 368 | ret = trace_test_buffer(tr, NULL); |
319 | if (ret) | 369 | if (ret) { |
370 | tracing_start(); | ||
320 | goto out; | 371 | goto out; |
372 | } | ||
321 | 373 | ||
322 | ret = trace_test_buffer(&max_tr, &count); | 374 | ret = trace_test_buffer(&max_tr, &count); |
323 | if (ret) | 375 | if (ret) { |
376 | tracing_start(); | ||
324 | goto out; | 377 | goto out; |
378 | } | ||
325 | 379 | ||
326 | if (!ret && !count) { | 380 | if (!ret && !count) { |
327 | printk(KERN_CONT ".. no entries found .."); | 381 | printk(KERN_CONT ".. no entries found .."); |
328 | ret = -1; | 382 | ret = -1; |
383 | tracing_start(); | ||
329 | goto out; | 384 | goto out; |
330 | } | 385 | } |
331 | 386 | ||
332 | /* do the test by disabling interrupts first this time */ | 387 | /* do the test by disabling interrupts first this time */ |
333 | tracing_max_latency = 0; | 388 | tracing_max_latency = 0; |
334 | tr->ctrl = 1; | 389 | tracing_start(); |
335 | trace->ctrl_update(tr); | ||
336 | preempt_disable(); | 390 | preempt_disable(); |
337 | local_irq_disable(); | 391 | local_irq_disable(); |
338 | udelay(100); | 392 | udelay(100); |
@@ -341,8 +395,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
341 | local_irq_enable(); | 395 | local_irq_enable(); |
342 | 396 | ||
343 | /* stop the tracing. */ | 397 | /* stop the tracing. */ |
344 | tr->ctrl = 0; | 398 | tracing_stop(); |
345 | trace->ctrl_update(tr); | ||
346 | /* check both trace buffers */ | 399 | /* check both trace buffers */ |
347 | ret = trace_test_buffer(tr, NULL); | 400 | ret = trace_test_buffer(tr, NULL); |
348 | if (ret) | 401 | if (ret) |
@@ -358,6 +411,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
358 | 411 | ||
359 | out: | 412 | out: |
360 | trace->reset(tr); | 413 | trace->reset(tr); |
414 | tracing_start(); | ||
361 | tracing_max_latency = save_max; | 415 | tracing_max_latency = save_max; |
362 | 416 | ||
363 | return ret; | 417 | return ret; |
@@ -423,8 +477,12 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
423 | wait_for_completion(&isrt); | 477 | wait_for_completion(&isrt); |
424 | 478 | ||
425 | /* start the tracing */ | 479 | /* start the tracing */ |
426 | tr->ctrl = 1; | 480 | ret = trace->init(tr); |
427 | trace->init(tr); | 481 | if (ret) { |
482 | warn_failed_init_tracer(trace, ret); | ||
483 | return ret; | ||
484 | } | ||
485 | |||
428 | /* reset the max latency */ | 486 | /* reset the max latency */ |
429 | tracing_max_latency = 0; | 487 | tracing_max_latency = 0; |
430 | 488 | ||
@@ -448,8 +506,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
448 | msleep(100); | 506 | msleep(100); |
449 | 507 | ||
450 | /* stop the tracing. */ | 508 | /* stop the tracing. */ |
451 | tr->ctrl = 0; | 509 | tracing_stop(); |
452 | trace->ctrl_update(tr); | ||
453 | /* check both trace buffers */ | 510 | /* check both trace buffers */ |
454 | ret = trace_test_buffer(tr, NULL); | 511 | ret = trace_test_buffer(tr, NULL); |
455 | if (!ret) | 512 | if (!ret) |
@@ -457,6 +514,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
457 | 514 | ||
458 | 515 | ||
459 | trace->reset(tr); | 516 | trace->reset(tr); |
517 | tracing_start(); | ||
460 | 518 | ||
461 | tracing_max_latency = save_max; | 519 | tracing_max_latency = save_max; |
462 | 520 | ||
@@ -480,16 +538,20 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
480 | int ret; | 538 | int ret; |
481 | 539 | ||
482 | /* start the tracing */ | 540 | /* start the tracing */ |
483 | tr->ctrl = 1; | 541 | ret = trace->init(tr); |
484 | trace->init(tr); | 542 | if (ret) { |
543 | warn_failed_init_tracer(trace, ret); | ||
544 | return ret; | ||
545 | } | ||
546 | |||
485 | /* Sleep for a 1/10 of a second */ | 547 | /* Sleep for a 1/10 of a second */ |
486 | msleep(100); | 548 | msleep(100); |
487 | /* stop the tracing. */ | 549 | /* stop the tracing. */ |
488 | tr->ctrl = 0; | 550 | tracing_stop(); |
489 | trace->ctrl_update(tr); | ||
490 | /* check the trace buffer */ | 551 | /* check the trace buffer */ |
491 | ret = trace_test_buffer(tr, &count); | 552 | ret = trace_test_buffer(tr, &count); |
492 | trace->reset(tr); | 553 | trace->reset(tr); |
554 | tracing_start(); | ||
493 | 555 | ||
494 | if (!ret && !count) { | 556 | if (!ret && !count) { |
495 | printk(KERN_CONT ".. no entries found .."); | 557 | printk(KERN_CONT ".. no entries found .."); |
@@ -508,17 +570,48 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
508 | int ret; | 570 | int ret; |
509 | 571 | ||
510 | /* start the tracing */ | 572 | /* start the tracing */ |
511 | tr->ctrl = 1; | 573 | ret = trace->init(tr); |
512 | trace->init(tr); | 574 | if (ret) { |
575 | warn_failed_init_tracer(trace, ret); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
513 | /* Sleep for a 1/10 of a second */ | 579 | /* Sleep for a 1/10 of a second */ |
514 | msleep(100); | 580 | msleep(100); |
515 | /* stop the tracing. */ | 581 | /* stop the tracing. */ |
516 | tr->ctrl = 0; | 582 | tracing_stop(); |
517 | trace->ctrl_update(tr); | ||
518 | /* check the trace buffer */ | 583 | /* check the trace buffer */ |
519 | ret = trace_test_buffer(tr, &count); | 584 | ret = trace_test_buffer(tr, &count); |
520 | trace->reset(tr); | 585 | trace->reset(tr); |
586 | tracing_start(); | ||
521 | 587 | ||
522 | return ret; | 588 | return ret; |
523 | } | 589 | } |
524 | #endif /* CONFIG_SYSPROF_TRACER */ | 590 | #endif /* CONFIG_SYSPROF_TRACER */ |
591 | |||
592 | #ifdef CONFIG_BRANCH_TRACER | ||
593 | int | ||
594 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | ||
595 | { | ||
596 | unsigned long count; | ||
597 | int ret; | ||
598 | |||
599 | /* start the tracing */ | ||
600 | ret = trace->init(tr); | ||
601 | if (ret) { | ||
602 | warn_failed_init_tracer(trace, ret); | ||
603 | return ret; | ||
604 | } | ||
605 | |||
606 | /* Sleep for a 1/10 of a second */ | ||
607 | msleep(100); | ||
608 | /* stop the tracing. */ | ||
609 | tracing_stop(); | ||
610 | /* check the trace buffer */ | ||
611 | ret = trace_test_buffer(tr, &count); | ||
612 | trace->reset(tr); | ||
613 | tracing_start(); | ||
614 | |||
615 | return ret; | ||
616 | } | ||
617 | #endif /* CONFIG_BRANCH_TRACER */ | ||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 3bdb44bde4b7..d0871bc0aca5 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/sysctl.h> | ||
13 | #include <linux/init.h> | 14 | #include <linux/init.h> |
14 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
15 | #include "trace.h" | 16 | #include "trace.h" |
@@ -31,6 +32,10 @@ static raw_spinlock_t max_stack_lock = | |||
31 | 32 | ||
32 | static int stack_trace_disabled __read_mostly; | 33 | static int stack_trace_disabled __read_mostly; |
33 | static DEFINE_PER_CPU(int, trace_active); | 34 | static DEFINE_PER_CPU(int, trace_active); |
35 | static DEFINE_MUTEX(stack_sysctl_mutex); | ||
36 | |||
37 | int stack_tracer_enabled; | ||
38 | static int last_stack_tracer_enabled; | ||
34 | 39 | ||
35 | static inline void check_stack(void) | 40 | static inline void check_stack(void) |
36 | { | 41 | { |
@@ -48,7 +53,7 @@ static inline void check_stack(void) | |||
48 | if (!object_is_on_stack(&this_size)) | 53 | if (!object_is_on_stack(&this_size)) |
49 | return; | 54 | return; |
50 | 55 | ||
51 | raw_local_irq_save(flags); | 56 | local_irq_save(flags); |
52 | __raw_spin_lock(&max_stack_lock); | 57 | __raw_spin_lock(&max_stack_lock); |
53 | 58 | ||
54 | /* a race could have already updated it */ | 59 | /* a race could have already updated it */ |
@@ -78,6 +83,7 @@ static inline void check_stack(void) | |||
78 | * on a new max, so it is far from a fast path. | 83 | * on a new max, so it is far from a fast path. |
79 | */ | 84 | */ |
80 | while (i < max_stack_trace.nr_entries) { | 85 | while (i < max_stack_trace.nr_entries) { |
86 | int found = 0; | ||
81 | 87 | ||
82 | stack_dump_index[i] = this_size; | 88 | stack_dump_index[i] = this_size; |
83 | p = start; | 89 | p = start; |
@@ -86,17 +92,19 @@ static inline void check_stack(void) | |||
86 | if (*p == stack_dump_trace[i]) { | 92 | if (*p == stack_dump_trace[i]) { |
87 | this_size = stack_dump_index[i++] = | 93 | this_size = stack_dump_index[i++] = |
88 | (top - p) * sizeof(unsigned long); | 94 | (top - p) * sizeof(unsigned long); |
95 | found = 1; | ||
89 | /* Start the search from here */ | 96 | /* Start the search from here */ |
90 | start = p + 1; | 97 | start = p + 1; |
91 | } | 98 | } |
92 | } | 99 | } |
93 | 100 | ||
94 | i++; | 101 | if (!found) |
102 | i++; | ||
95 | } | 103 | } |
96 | 104 | ||
97 | out: | 105 | out: |
98 | __raw_spin_unlock(&max_stack_lock); | 106 | __raw_spin_unlock(&max_stack_lock); |
99 | raw_local_irq_restore(flags); | 107 | local_irq_restore(flags); |
100 | } | 108 | } |
101 | 109 | ||
102 | static void | 110 | static void |
@@ -107,8 +115,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
107 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
108 | return; | 116 | return; |
109 | 117 | ||
110 | resched = need_resched(); | 118 | resched = ftrace_preempt_disable(); |
111 | preempt_disable_notrace(); | ||
112 | 119 | ||
113 | cpu = raw_smp_processor_id(); | 120 | cpu = raw_smp_processor_id(); |
114 | /* no atomic needed, we only modify this variable by this cpu */ | 121 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -120,10 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
120 | out: | 127 | out: |
121 | per_cpu(trace_active, cpu)--; | 128 | per_cpu(trace_active, cpu)--; |
122 | /* prevent recursion in schedule */ | 129 | /* prevent recursion in schedule */ |
123 | if (resched) | 130 | ftrace_preempt_enable(resched); |
124 | preempt_enable_no_resched_notrace(); | ||
125 | else | ||
126 | preempt_enable_notrace(); | ||
127 | } | 131 | } |
128 | 132 | ||
129 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -166,16 +170,16 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
166 | if (ret < 0) | 170 | if (ret < 0) |
167 | return ret; | 171 | return ret; |
168 | 172 | ||
169 | raw_local_irq_save(flags); | 173 | local_irq_save(flags); |
170 | __raw_spin_lock(&max_stack_lock); | 174 | __raw_spin_lock(&max_stack_lock); |
171 | *ptr = val; | 175 | *ptr = val; |
172 | __raw_spin_unlock(&max_stack_lock); | 176 | __raw_spin_unlock(&max_stack_lock); |
173 | raw_local_irq_restore(flags); | 177 | local_irq_restore(flags); |
174 | 178 | ||
175 | return count; | 179 | return count; |
176 | } | 180 | } |
177 | 181 | ||
178 | static struct file_operations stack_max_size_fops = { | 182 | static const struct file_operations stack_max_size_fops = { |
179 | .open = tracing_open_generic, | 183 | .open = tracing_open_generic, |
180 | .read = stack_max_size_read, | 184 | .read = stack_max_size_read, |
181 | .write = stack_max_size_write, | 185 | .write = stack_max_size_write, |
@@ -273,7 +277,7 @@ static int t_show(struct seq_file *m, void *v) | |||
273 | return 0; | 277 | return 0; |
274 | } | 278 | } |
275 | 279 | ||
276 | static struct seq_operations stack_trace_seq_ops = { | 280 | static const struct seq_operations stack_trace_seq_ops = { |
277 | .start = t_start, | 281 | .start = t_start, |
278 | .next = t_next, | 282 | .next = t_next, |
279 | .stop = t_stop, | 283 | .stop = t_stop, |
@@ -289,12 +293,47 @@ static int stack_trace_open(struct inode *inode, struct file *file) | |||
289 | return ret; | 293 | return ret; |
290 | } | 294 | } |
291 | 295 | ||
292 | static struct file_operations stack_trace_fops = { | 296 | static const struct file_operations stack_trace_fops = { |
293 | .open = stack_trace_open, | 297 | .open = stack_trace_open, |
294 | .read = seq_read, | 298 | .read = seq_read, |
295 | .llseek = seq_lseek, | 299 | .llseek = seq_lseek, |
296 | }; | 300 | }; |
297 | 301 | ||
302 | int | ||
303 | stack_trace_sysctl(struct ctl_table *table, int write, | ||
304 | struct file *file, void __user *buffer, size_t *lenp, | ||
305 | loff_t *ppos) | ||
306 | { | ||
307 | int ret; | ||
308 | |||
309 | mutex_lock(&stack_sysctl_mutex); | ||
310 | |||
311 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | ||
312 | |||
313 | if (ret || !write || | ||
314 | (last_stack_tracer_enabled == stack_tracer_enabled)) | ||
315 | goto out; | ||
316 | |||
317 | last_stack_tracer_enabled = stack_tracer_enabled; | ||
318 | |||
319 | if (stack_tracer_enabled) | ||
320 | register_ftrace_function(&trace_ops); | ||
321 | else | ||
322 | unregister_ftrace_function(&trace_ops); | ||
323 | |||
324 | out: | ||
325 | mutex_unlock(&stack_sysctl_mutex); | ||
326 | return ret; | ||
327 | } | ||
328 | |||
329 | static __init int enable_stacktrace(char *str) | ||
330 | { | ||
331 | stack_tracer_enabled = 1; | ||
332 | last_stack_tracer_enabled = 1; | ||
333 | return 1; | ||
334 | } | ||
335 | __setup("stacktrace", enable_stacktrace); | ||
336 | |||
298 | static __init int stack_trace_init(void) | 337 | static __init int stack_trace_init(void) |
299 | { | 338 | { |
300 | struct dentry *d_tracer; | 339 | struct dentry *d_tracer; |
@@ -312,7 +351,8 @@ static __init int stack_trace_init(void) | |||
312 | if (!entry) | 351 | if (!entry) |
313 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); | 352 | pr_warning("Could not create debugfs 'stack_trace' entry\n"); |
314 | 353 | ||
315 | register_ftrace_function(&trace_ops); | 354 | if (stack_tracer_enabled) |
355 | register_ftrace_function(&trace_ops); | ||
316 | 356 | ||
317 | return 0; | 357 | return 0; |
318 | } | 358 | } |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index 9587d3bcba55..01becf1f19ff 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -234,20 +234,10 @@ static void stop_stack_timers(void) | |||
234 | stop_stack_timer(cpu); | 234 | stop_stack_timer(cpu); |
235 | } | 235 | } |
236 | 236 | ||
237 | static void stack_reset(struct trace_array *tr) | ||
238 | { | ||
239 | int cpu; | ||
240 | |||
241 | tr->time_start = ftrace_now(tr->cpu); | ||
242 | |||
243 | for_each_online_cpu(cpu) | ||
244 | tracing_reset(tr, cpu); | ||
245 | } | ||
246 | |||
247 | static void start_stack_trace(struct trace_array *tr) | 237 | static void start_stack_trace(struct trace_array *tr) |
248 | { | 238 | { |
249 | mutex_lock(&sample_timer_lock); | 239 | mutex_lock(&sample_timer_lock); |
250 | stack_reset(tr); | 240 | tracing_reset_online_cpus(tr); |
251 | start_stack_timers(); | 241 | start_stack_timers(); |
252 | tracer_enabled = 1; | 242 | tracer_enabled = 1; |
253 | mutex_unlock(&sample_timer_lock); | 243 | mutex_unlock(&sample_timer_lock); |
@@ -261,27 +251,17 @@ static void stop_stack_trace(struct trace_array *tr) | |||
261 | mutex_unlock(&sample_timer_lock); | 251 | mutex_unlock(&sample_timer_lock); |
262 | } | 252 | } |
263 | 253 | ||
264 | static void stack_trace_init(struct trace_array *tr) | 254 | static int stack_trace_init(struct trace_array *tr) |
265 | { | 255 | { |
266 | sysprof_trace = tr; | 256 | sysprof_trace = tr; |
267 | 257 | ||
268 | if (tr->ctrl) | 258 | start_stack_trace(tr); |
269 | start_stack_trace(tr); | 259 | return 0; |
270 | } | 260 | } |
271 | 261 | ||
272 | static void stack_trace_reset(struct trace_array *tr) | 262 | static void stack_trace_reset(struct trace_array *tr) |
273 | { | 263 | { |
274 | if (tr->ctrl) | 264 | stop_stack_trace(tr); |
275 | stop_stack_trace(tr); | ||
276 | } | ||
277 | |||
278 | static void stack_trace_ctrl_update(struct trace_array *tr) | ||
279 | { | ||
280 | /* When starting a new trace, reset the buffers */ | ||
281 | if (tr->ctrl) | ||
282 | start_stack_trace(tr); | ||
283 | else | ||
284 | stop_stack_trace(tr); | ||
285 | } | 265 | } |
286 | 266 | ||
287 | static struct tracer stack_trace __read_mostly = | 267 | static struct tracer stack_trace __read_mostly = |
@@ -289,7 +269,6 @@ static struct tracer stack_trace __read_mostly = | |||
289 | .name = "sysprof", | 269 | .name = "sysprof", |
290 | .init = stack_trace_init, | 270 | .init = stack_trace_init, |
291 | .reset = stack_trace_reset, | 271 | .reset = stack_trace_reset, |
292 | .ctrl_update = stack_trace_ctrl_update, | ||
293 | #ifdef CONFIG_FTRACE_SELFTEST | 272 | #ifdef CONFIG_FTRACE_SELFTEST |
294 | .selftest = trace_selftest_startup_sysprof, | 273 | .selftest = trace_selftest_startup_sysprof, |
295 | #endif | 274 | #endif |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index af8c85664882..79602740bbb5 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -43,6 +43,7 @@ static DEFINE_MUTEX(tracepoints_mutex); | |||
43 | */ | 43 | */ |
44 | #define TRACEPOINT_HASH_BITS 6 | 44 | #define TRACEPOINT_HASH_BITS 6 |
45 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) | 45 | #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS) |
46 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | ||
46 | 47 | ||
47 | /* | 48 | /* |
48 | * Note about RCU : | 49 | * Note about RCU : |
@@ -54,40 +55,43 @@ struct tracepoint_entry { | |||
54 | struct hlist_node hlist; | 55 | struct hlist_node hlist; |
55 | void **funcs; | 56 | void **funcs; |
56 | int refcount; /* Number of times armed. 0 if disarmed. */ | 57 | int refcount; /* Number of times armed. 0 if disarmed. */ |
57 | struct rcu_head rcu; | ||
58 | void *oldptr; | ||
59 | unsigned char rcu_pending:1; | ||
60 | char name[0]; | 58 | char name[0]; |
61 | }; | 59 | }; |
62 | 60 | ||
63 | static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; | 61 | struct tp_probes { |
62 | union { | ||
63 | struct rcu_head rcu; | ||
64 | struct list_head list; | ||
65 | } u; | ||
66 | void *probes[0]; | ||
67 | }; | ||
64 | 68 | ||
65 | static void free_old_closure(struct rcu_head *head) | 69 | static inline void *allocate_probes(int count) |
66 | { | 70 | { |
67 | struct tracepoint_entry *entry = container_of(head, | 71 | struct tp_probes *p = kmalloc(count * sizeof(void *) |
68 | struct tracepoint_entry, rcu); | 72 | + sizeof(struct tp_probes), GFP_KERNEL); |
69 | kfree(entry->oldptr); | 73 | return p == NULL ? NULL : p->probes; |
70 | /* Make sure we free the data before setting the pending flag to 0 */ | ||
71 | smp_wmb(); | ||
72 | entry->rcu_pending = 0; | ||
73 | } | 74 | } |
74 | 75 | ||
75 | static void tracepoint_entry_free_old(struct tracepoint_entry *entry, void *old) | 76 | static void rcu_free_old_probes(struct rcu_head *head) |
76 | { | 77 | { |
77 | if (!old) | 78 | kfree(container_of(head, struct tp_probes, u.rcu)); |
78 | return; | 79 | } |
79 | entry->oldptr = old; | 80 | |
80 | entry->rcu_pending = 1; | 81 | static inline void release_probes(void *old) |
81 | /* write rcu_pending before calling the RCU callback */ | 82 | { |
82 | smp_wmb(); | 83 | if (old) { |
83 | call_rcu_sched(&entry->rcu, free_old_closure); | 84 | struct tp_probes *tp_probes = container_of(old, |
85 | struct tp_probes, probes[0]); | ||
86 | call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes); | ||
87 | } | ||
84 | } | 88 | } |
85 | 89 | ||
86 | static void debug_print_probes(struct tracepoint_entry *entry) | 90 | static void debug_print_probes(struct tracepoint_entry *entry) |
87 | { | 91 | { |
88 | int i; | 92 | int i; |
89 | 93 | ||
90 | if (!tracepoint_debug) | 94 | if (!tracepoint_debug || !entry->funcs) |
91 | return; | 95 | return; |
92 | 96 | ||
93 | for (i = 0; entry->funcs[i]; i++) | 97 | for (i = 0; entry->funcs[i]; i++) |
@@ -111,12 +115,13 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) | |||
111 | return ERR_PTR(-EEXIST); | 115 | return ERR_PTR(-EEXIST); |
112 | } | 116 | } |
113 | /* + 2 : one for new probe, one for NULL func */ | 117 | /* + 2 : one for new probe, one for NULL func */ |
114 | new = kzalloc((nr_probes + 2) * sizeof(void *), GFP_KERNEL); | 118 | new = allocate_probes(nr_probes + 2); |
115 | if (new == NULL) | 119 | if (new == NULL) |
116 | return ERR_PTR(-ENOMEM); | 120 | return ERR_PTR(-ENOMEM); |
117 | if (old) | 121 | if (old) |
118 | memcpy(new, old, nr_probes * sizeof(void *)); | 122 | memcpy(new, old, nr_probes * sizeof(void *)); |
119 | new[nr_probes] = probe; | 123 | new[nr_probes] = probe; |
124 | new[nr_probes + 1] = NULL; | ||
120 | entry->refcount = nr_probes + 1; | 125 | entry->refcount = nr_probes + 1; |
121 | entry->funcs = new; | 126 | entry->funcs = new; |
122 | debug_print_probes(entry); | 127 | debug_print_probes(entry); |
@@ -132,7 +137,7 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) | |||
132 | old = entry->funcs; | 137 | old = entry->funcs; |
133 | 138 | ||
134 | if (!old) | 139 | if (!old) |
135 | return NULL; | 140 | return ERR_PTR(-ENOENT); |
136 | 141 | ||
137 | debug_print_probes(entry); | 142 | debug_print_probes(entry); |
138 | /* (N -> M), (N > 1, M >= 0) probes */ | 143 | /* (N -> M), (N > 1, M >= 0) probes */ |
@@ -151,13 +156,13 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) | |||
151 | int j = 0; | 156 | int j = 0; |
152 | /* N -> M, (N > 1, M > 0) */ | 157 | /* N -> M, (N > 1, M > 0) */ |
153 | /* + 1 for NULL */ | 158 | /* + 1 for NULL */ |
154 | new = kzalloc((nr_probes - nr_del + 1) | 159 | new = allocate_probes(nr_probes - nr_del + 1); |
155 | * sizeof(void *), GFP_KERNEL); | ||
156 | if (new == NULL) | 160 | if (new == NULL) |
157 | return ERR_PTR(-ENOMEM); | 161 | return ERR_PTR(-ENOMEM); |
158 | for (i = 0; old[i]; i++) | 162 | for (i = 0; old[i]; i++) |
159 | if ((probe && old[i] != probe)) | 163 | if ((probe && old[i] != probe)) |
160 | new[j++] = old[i]; | 164 | new[j++] = old[i]; |
165 | new[nr_probes - nr_del] = NULL; | ||
161 | entry->refcount = nr_probes - nr_del; | 166 | entry->refcount = nr_probes - nr_del; |
162 | entry->funcs = new; | 167 | entry->funcs = new; |
163 | } | 168 | } |
@@ -215,7 +220,6 @@ static struct tracepoint_entry *add_tracepoint(const char *name) | |||
215 | memcpy(&e->name[0], name, name_len); | 220 | memcpy(&e->name[0], name, name_len); |
216 | e->funcs = NULL; | 221 | e->funcs = NULL; |
217 | e->refcount = 0; | 222 | e->refcount = 0; |
218 | e->rcu_pending = 0; | ||
219 | hlist_add_head(&e->hlist, head); | 223 | hlist_add_head(&e->hlist, head); |
220 | return e; | 224 | return e; |
221 | } | 225 | } |
@@ -224,32 +228,10 @@ static struct tracepoint_entry *add_tracepoint(const char *name) | |||
224 | * Remove the tracepoint from the tracepoint hash table. Must be called with | 228 | * Remove the tracepoint from the tracepoint hash table. Must be called with |
225 | * mutex_lock held. | 229 | * mutex_lock held. |
226 | */ | 230 | */ |
227 | static int remove_tracepoint(const char *name) | 231 | static inline void remove_tracepoint(struct tracepoint_entry *e) |
228 | { | 232 | { |
229 | struct hlist_head *head; | ||
230 | struct hlist_node *node; | ||
231 | struct tracepoint_entry *e; | ||
232 | int found = 0; | ||
233 | size_t len = strlen(name) + 1; | ||
234 | u32 hash = jhash(name, len-1, 0); | ||
235 | |||
236 | head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)]; | ||
237 | hlist_for_each_entry(e, node, head, hlist) { | ||
238 | if (!strcmp(name, e->name)) { | ||
239 | found = 1; | ||
240 | break; | ||
241 | } | ||
242 | } | ||
243 | if (!found) | ||
244 | return -ENOENT; | ||
245 | if (e->refcount) | ||
246 | return -EBUSY; | ||
247 | hlist_del(&e->hlist); | 233 | hlist_del(&e->hlist); |
248 | /* Make sure the call_rcu_sched has been executed */ | ||
249 | if (e->rcu_pending) | ||
250 | rcu_barrier_sched(); | ||
251 | kfree(e); | 234 | kfree(e); |
252 | return 0; | ||
253 | } | 235 | } |
254 | 236 | ||
255 | /* | 237 | /* |
@@ -280,6 +262,7 @@ static void set_tracepoint(struct tracepoint_entry **entry, | |||
280 | static void disable_tracepoint(struct tracepoint *elem) | 262 | static void disable_tracepoint(struct tracepoint *elem) |
281 | { | 263 | { |
282 | elem->state = 0; | 264 | elem->state = 0; |
265 | rcu_assign_pointer(elem->funcs, NULL); | ||
283 | } | 266 | } |
284 | 267 | ||
285 | /** | 268 | /** |
@@ -320,6 +303,23 @@ static void tracepoint_update_probes(void) | |||
320 | module_update_tracepoints(); | 303 | module_update_tracepoints(); |
321 | } | 304 | } |
322 | 305 | ||
306 | static void *tracepoint_add_probe(const char *name, void *probe) | ||
307 | { | ||
308 | struct tracepoint_entry *entry; | ||
309 | void *old; | ||
310 | |||
311 | entry = get_tracepoint(name); | ||
312 | if (!entry) { | ||
313 | entry = add_tracepoint(name); | ||
314 | if (IS_ERR(entry)) | ||
315 | return entry; | ||
316 | } | ||
317 | old = tracepoint_entry_add_probe(entry, probe); | ||
318 | if (IS_ERR(old) && !entry->refcount) | ||
319 | remove_tracepoint(entry); | ||
320 | return old; | ||
321 | } | ||
322 | |||
323 | /** | 323 | /** |
324 | * tracepoint_probe_register - Connect a probe to a tracepoint | 324 | * tracepoint_probe_register - Connect a probe to a tracepoint |
325 | * @name: tracepoint name | 325 | * @name: tracepoint name |
@@ -330,44 +330,36 @@ static void tracepoint_update_probes(void) | |||
330 | */ | 330 | */ |
331 | int tracepoint_probe_register(const char *name, void *probe) | 331 | int tracepoint_probe_register(const char *name, void *probe) |
332 | { | 332 | { |
333 | struct tracepoint_entry *entry; | ||
334 | int ret = 0; | ||
335 | void *old; | 333 | void *old; |
336 | 334 | ||
337 | mutex_lock(&tracepoints_mutex); | 335 | mutex_lock(&tracepoints_mutex); |
338 | entry = get_tracepoint(name); | 336 | old = tracepoint_add_probe(name, probe); |
339 | if (!entry) { | ||
340 | entry = add_tracepoint(name); | ||
341 | if (IS_ERR(entry)) { | ||
342 | ret = PTR_ERR(entry); | ||
343 | goto end; | ||
344 | } | ||
345 | } | ||
346 | /* | ||
347 | * If we detect that a call_rcu_sched is pending for this tracepoint, | ||
348 | * make sure it's executed now. | ||
349 | */ | ||
350 | if (entry->rcu_pending) | ||
351 | rcu_barrier_sched(); | ||
352 | old = tracepoint_entry_add_probe(entry, probe); | ||
353 | if (IS_ERR(old)) { | ||
354 | ret = PTR_ERR(old); | ||
355 | goto end; | ||
356 | } | ||
357 | mutex_unlock(&tracepoints_mutex); | 337 | mutex_unlock(&tracepoints_mutex); |
338 | if (IS_ERR(old)) | ||
339 | return PTR_ERR(old); | ||
340 | |||
358 | tracepoint_update_probes(); /* may update entry */ | 341 | tracepoint_update_probes(); /* may update entry */ |
359 | mutex_lock(&tracepoints_mutex); | 342 | release_probes(old); |
360 | entry = get_tracepoint(name); | 343 | return 0; |
361 | WARN_ON(!entry); | ||
362 | if (entry->rcu_pending) | ||
363 | rcu_barrier_sched(); | ||
364 | tracepoint_entry_free_old(entry, old); | ||
365 | end: | ||
366 | mutex_unlock(&tracepoints_mutex); | ||
367 | return ret; | ||
368 | } | 344 | } |
369 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); | 345 | EXPORT_SYMBOL_GPL(tracepoint_probe_register); |
370 | 346 | ||
347 | static void *tracepoint_remove_probe(const char *name, void *probe) | ||
348 | { | ||
349 | struct tracepoint_entry *entry; | ||
350 | void *old; | ||
351 | |||
352 | entry = get_tracepoint(name); | ||
353 | if (!entry) | ||
354 | return ERR_PTR(-ENOENT); | ||
355 | old = tracepoint_entry_remove_probe(entry, probe); | ||
356 | if (IS_ERR(old)) | ||
357 | return old; | ||
358 | if (!entry->refcount) | ||
359 | remove_tracepoint(entry); | ||
360 | return old; | ||
361 | } | ||
362 | |||
371 | /** | 363 | /** |
372 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint | 364 | * tracepoint_probe_unregister - Disconnect a probe from a tracepoint |
373 | * @name: tracepoint name | 365 | * @name: tracepoint name |
@@ -380,38 +372,104 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register); | |||
380 | */ | 372 | */ |
381 | int tracepoint_probe_unregister(const char *name, void *probe) | 373 | int tracepoint_probe_unregister(const char *name, void *probe) |
382 | { | 374 | { |
383 | struct tracepoint_entry *entry; | ||
384 | void *old; | 375 | void *old; |
385 | int ret = -ENOENT; | ||
386 | 376 | ||
387 | mutex_lock(&tracepoints_mutex); | 377 | mutex_lock(&tracepoints_mutex); |
388 | entry = get_tracepoint(name); | 378 | old = tracepoint_remove_probe(name, probe); |
389 | if (!entry) | ||
390 | goto end; | ||
391 | if (entry->rcu_pending) | ||
392 | rcu_barrier_sched(); | ||
393 | old = tracepoint_entry_remove_probe(entry, probe); | ||
394 | if (!old) { | ||
395 | printk(KERN_WARNING "Warning: Trying to unregister a probe" | ||
396 | "that doesn't exist\n"); | ||
397 | goto end; | ||
398 | } | ||
399 | mutex_unlock(&tracepoints_mutex); | 379 | mutex_unlock(&tracepoints_mutex); |
380 | if (IS_ERR(old)) | ||
381 | return PTR_ERR(old); | ||
382 | |||
400 | tracepoint_update_probes(); /* may update entry */ | 383 | tracepoint_update_probes(); /* may update entry */ |
384 | release_probes(old); | ||
385 | return 0; | ||
386 | } | ||
387 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | ||
388 | |||
389 | static LIST_HEAD(old_probes); | ||
390 | static int need_update; | ||
391 | |||
392 | static void tracepoint_add_old_probes(void *old) | ||
393 | { | ||
394 | need_update = 1; | ||
395 | if (old) { | ||
396 | struct tp_probes *tp_probes = container_of(old, | ||
397 | struct tp_probes, probes[0]); | ||
398 | list_add(&tp_probes->u.list, &old_probes); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * tracepoint_probe_register_noupdate - register a probe but not connect | ||
404 | * @name: tracepoint name | ||
405 | * @probe: probe handler | ||
406 | * | ||
407 | * caller must call tracepoint_probe_update_all() | ||
408 | */ | ||
409 | int tracepoint_probe_register_noupdate(const char *name, void *probe) | ||
410 | { | ||
411 | void *old; | ||
412 | |||
401 | mutex_lock(&tracepoints_mutex); | 413 | mutex_lock(&tracepoints_mutex); |
402 | entry = get_tracepoint(name); | 414 | old = tracepoint_add_probe(name, probe); |
403 | if (!entry) | 415 | if (IS_ERR(old)) { |
404 | goto end; | 416 | mutex_unlock(&tracepoints_mutex); |
405 | if (entry->rcu_pending) | 417 | return PTR_ERR(old); |
406 | rcu_barrier_sched(); | 418 | } |
407 | tracepoint_entry_free_old(entry, old); | 419 | tracepoint_add_old_probes(old); |
408 | remove_tracepoint(name); /* Ignore busy error message */ | ||
409 | ret = 0; | ||
410 | end: | ||
411 | mutex_unlock(&tracepoints_mutex); | 420 | mutex_unlock(&tracepoints_mutex); |
412 | return ret; | 421 | return 0; |
413 | } | 422 | } |
414 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | 423 | EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); |
424 | |||
425 | /** | ||
426 | * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect | ||
427 | * @name: tracepoint name | ||
428 | * @probe: probe function pointer | ||
429 | * | ||
430 | * caller must call tracepoint_probe_update_all() | ||
431 | */ | ||
432 | int tracepoint_probe_unregister_noupdate(const char *name, void *probe) | ||
433 | { | ||
434 | void *old; | ||
435 | |||
436 | mutex_lock(&tracepoints_mutex); | ||
437 | old = tracepoint_remove_probe(name, probe); | ||
438 | if (IS_ERR(old)) { | ||
439 | mutex_unlock(&tracepoints_mutex); | ||
440 | return PTR_ERR(old); | ||
441 | } | ||
442 | tracepoint_add_old_probes(old); | ||
443 | mutex_unlock(&tracepoints_mutex); | ||
444 | return 0; | ||
445 | } | ||
446 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate); | ||
447 | |||
448 | /** | ||
449 | * tracepoint_probe_update_all - update tracepoints | ||
450 | */ | ||
451 | void tracepoint_probe_update_all(void) | ||
452 | { | ||
453 | LIST_HEAD(release_probes); | ||
454 | struct tp_probes *pos, *next; | ||
455 | |||
456 | mutex_lock(&tracepoints_mutex); | ||
457 | if (!need_update) { | ||
458 | mutex_unlock(&tracepoints_mutex); | ||
459 | return; | ||
460 | } | ||
461 | if (!list_empty(&old_probes)) | ||
462 | list_replace_init(&old_probes, &release_probes); | ||
463 | need_update = 0; | ||
464 | mutex_unlock(&tracepoints_mutex); | ||
465 | |||
466 | tracepoint_update_probes(); | ||
467 | list_for_each_entry_safe(pos, next, &release_probes, u.list) { | ||
468 | list_del(&pos->u.list); | ||
469 | call_rcu_sched(&pos->u.rcu, rcu_free_old_probes); | ||
470 | } | ||
471 | } | ||
472 | EXPORT_SYMBOL_GPL(tracepoint_probe_update_all); | ||
415 | 473 | ||
416 | /** | 474 | /** |
417 | * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. | 475 | * tracepoint_get_iter_range - Get a next tracepoint iterator given a range. |
@@ -483,3 +541,36 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) | |||
483 | iter->tracepoint = NULL; | 541 | iter->tracepoint = NULL; |
484 | } | 542 | } |
485 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); | 543 | EXPORT_SYMBOL_GPL(tracepoint_iter_reset); |
544 | |||
545 | #ifdef CONFIG_MODULES | ||
546 | |||
547 | int tracepoint_module_notify(struct notifier_block *self, | ||
548 | unsigned long val, void *data) | ||
549 | { | ||
550 | struct module *mod = data; | ||
551 | |||
552 | switch (val) { | ||
553 | case MODULE_STATE_COMING: | ||
554 | tracepoint_update_probe_range(mod->tracepoints, | ||
555 | mod->tracepoints + mod->num_tracepoints); | ||
556 | break; | ||
557 | case MODULE_STATE_GOING: | ||
558 | tracepoint_update_probe_range(mod->tracepoints, | ||
559 | mod->tracepoints + mod->num_tracepoints); | ||
560 | break; | ||
561 | } | ||
562 | return 0; | ||
563 | } | ||
564 | |||
565 | struct notifier_block tracepoint_module_nb = { | ||
566 | .notifier_call = tracepoint_module_notify, | ||
567 | .priority = 0, | ||
568 | }; | ||
569 | |||
570 | static int init_tracepoints(void) | ||
571 | { | ||
572 | return register_module_notifier(&tracepoint_module_nb); | ||
573 | } | ||
574 | __initcall(init_tracepoints); | ||
575 | |||
576 | #endif /* CONFIG_MODULES */ | ||
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 8ebcd8532dfb..2dc06ab35716 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -27,6 +27,7 @@ | |||
27 | */ | 27 | */ |
28 | void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | 28 | void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) |
29 | { | 29 | { |
30 | const struct cred *tcred; | ||
30 | struct timespec uptime, ts; | 31 | struct timespec uptime, ts; |
31 | u64 ac_etime; | 32 | u64 ac_etime; |
32 | 33 | ||
@@ -53,10 +54,11 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
53 | stats->ac_flag |= AXSIG; | 54 | stats->ac_flag |= AXSIG; |
54 | stats->ac_nice = task_nice(tsk); | 55 | stats->ac_nice = task_nice(tsk); |
55 | stats->ac_sched = tsk->policy; | 56 | stats->ac_sched = tsk->policy; |
56 | stats->ac_uid = tsk->uid; | ||
57 | stats->ac_gid = tsk->gid; | ||
58 | stats->ac_pid = tsk->pid; | 57 | stats->ac_pid = tsk->pid; |
59 | rcu_read_lock(); | 58 | rcu_read_lock(); |
59 | tcred = __task_cred(tsk); | ||
60 | stats->ac_uid = tcred->uid; | ||
61 | stats->ac_gid = tcred->gid; | ||
60 | stats->ac_ppid = pid_alive(tsk) ? | 62 | stats->ac_ppid = pid_alive(tsk) ? |
61 | rcu_dereference(tsk->real_parent)->tgid : 0; | 63 | rcu_dereference(tsk->real_parent)->tgid : 0; |
62 | rcu_read_unlock(); | 64 | rcu_read_unlock(); |
diff --git a/kernel/uid16.c b/kernel/uid16.c index 3e41c1673e2f..2460c3199b5a 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c | |||
@@ -84,11 +84,12 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) | |||
84 | 84 | ||
85 | asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) | 85 | asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) |
86 | { | 86 | { |
87 | const struct cred *cred = current_cred(); | ||
87 | int retval; | 88 | int retval; |
88 | 89 | ||
89 | if (!(retval = put_user(high2lowuid(current->uid), ruid)) && | 90 | if (!(retval = put_user(high2lowuid(cred->uid), ruid)) && |
90 | !(retval = put_user(high2lowuid(current->euid), euid))) | 91 | !(retval = put_user(high2lowuid(cred->euid), euid))) |
91 | retval = put_user(high2lowuid(current->suid), suid); | 92 | retval = put_user(high2lowuid(cred->suid), suid); |
92 | 93 | ||
93 | return retval; | 94 | return retval; |
94 | } | 95 | } |
@@ -104,11 +105,12 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) | |||
104 | 105 | ||
105 | asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) | 106 | asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) |
106 | { | 107 | { |
108 | const struct cred *cred = current_cred(); | ||
107 | int retval; | 109 | int retval; |
108 | 110 | ||
109 | if (!(retval = put_user(high2lowgid(current->gid), rgid)) && | 111 | if (!(retval = put_user(high2lowgid(cred->gid), rgid)) && |
110 | !(retval = put_user(high2lowgid(current->egid), egid))) | 112 | !(retval = put_user(high2lowgid(cred->egid), egid))) |
111 | retval = put_user(high2lowgid(current->sgid), sgid); | 113 | retval = put_user(high2lowgid(cred->sgid), sgid); |
112 | 114 | ||
113 | return retval; | 115 | return retval; |
114 | } | 116 | } |
@@ -161,25 +163,24 @@ static int groups16_from_user(struct group_info *group_info, | |||
161 | 163 | ||
162 | asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) | 164 | asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) |
163 | { | 165 | { |
164 | int i = 0; | 166 | const struct cred *cred = current_cred(); |
167 | int i; | ||
165 | 168 | ||
166 | if (gidsetsize < 0) | 169 | if (gidsetsize < 0) |
167 | return -EINVAL; | 170 | return -EINVAL; |
168 | 171 | ||
169 | get_group_info(current->group_info); | 172 | i = cred->group_info->ngroups; |
170 | i = current->group_info->ngroups; | ||
171 | if (gidsetsize) { | 173 | if (gidsetsize) { |
172 | if (i > gidsetsize) { | 174 | if (i > gidsetsize) { |
173 | i = -EINVAL; | 175 | i = -EINVAL; |
174 | goto out; | 176 | goto out; |
175 | } | 177 | } |
176 | if (groups16_to_user(grouplist, current->group_info)) { | 178 | if (groups16_to_user(grouplist, cred->group_info)) { |
177 | i = -EFAULT; | 179 | i = -EFAULT; |
178 | goto out; | 180 | goto out; |
179 | } | 181 | } |
180 | } | 182 | } |
181 | out: | 183 | out: |
182 | put_group_info(current->group_info); | ||
183 | return i; | 184 | return i; |
184 | } | 185 | } |
185 | 186 | ||
@@ -210,20 +211,20 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) | |||
210 | 211 | ||
211 | asmlinkage long sys_getuid16(void) | 212 | asmlinkage long sys_getuid16(void) |
212 | { | 213 | { |
213 | return high2lowuid(current->uid); | 214 | return high2lowuid(current_uid()); |
214 | } | 215 | } |
215 | 216 | ||
216 | asmlinkage long sys_geteuid16(void) | 217 | asmlinkage long sys_geteuid16(void) |
217 | { | 218 | { |
218 | return high2lowuid(current->euid); | 219 | return high2lowuid(current_euid()); |
219 | } | 220 | } |
220 | 221 | ||
221 | asmlinkage long sys_getgid16(void) | 222 | asmlinkage long sys_getgid16(void) |
222 | { | 223 | { |
223 | return high2lowgid(current->gid); | 224 | return high2lowgid(current_gid()); |
224 | } | 225 | } |
225 | 226 | ||
226 | asmlinkage long sys_getegid16(void) | 227 | asmlinkage long sys_getegid16(void) |
227 | { | 228 | { |
228 | return high2lowgid(current->egid); | 229 | return high2lowgid(current_egid()); |
229 | } | 230 | } |
diff --git a/kernel/user.c b/kernel/user.c index 39d6159fae43..477b6660f447 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -16,12 +16,13 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
18 | #include <linux/user_namespace.h> | 18 | #include <linux/user_namespace.h> |
19 | #include "cred-internals.h" | ||
19 | 20 | ||
20 | struct user_namespace init_user_ns = { | 21 | struct user_namespace init_user_ns = { |
21 | .kref = { | 22 | .kref = { |
22 | .refcount = ATOMIC_INIT(2), | 23 | .refcount = ATOMIC_INIT(1), |
23 | }, | 24 | }, |
24 | .root_user = &root_user, | 25 | .creator = &root_user, |
25 | }; | 26 | }; |
26 | EXPORT_SYMBOL_GPL(init_user_ns); | 27 | EXPORT_SYMBOL_GPL(init_user_ns); |
27 | 28 | ||
@@ -47,12 +48,14 @@ static struct kmem_cache *uid_cachep; | |||
47 | */ | 48 | */ |
48 | static DEFINE_SPINLOCK(uidhash_lock); | 49 | static DEFINE_SPINLOCK(uidhash_lock); |
49 | 50 | ||
51 | /* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */ | ||
50 | struct user_struct root_user = { | 52 | struct user_struct root_user = { |
51 | .__count = ATOMIC_INIT(1), | 53 | .__count = ATOMIC_INIT(2), |
52 | .processes = ATOMIC_INIT(1), | 54 | .processes = ATOMIC_INIT(1), |
53 | .files = ATOMIC_INIT(0), | 55 | .files = ATOMIC_INIT(0), |
54 | .sigpending = ATOMIC_INIT(0), | 56 | .sigpending = ATOMIC_INIT(0), |
55 | .locked_shm = 0, | 57 | .locked_shm = 0, |
58 | .user_ns = &init_user_ns, | ||
56 | #ifdef CONFIG_USER_SCHED | 59 | #ifdef CONFIG_USER_SCHED |
57 | .tg = &init_task_group, | 60 | .tg = &init_task_group, |
58 | #endif | 61 | #endif |
@@ -101,19 +104,15 @@ static int sched_create_user(struct user_struct *up) | |||
101 | if (IS_ERR(up->tg)) | 104 | if (IS_ERR(up->tg)) |
102 | rc = -ENOMEM; | 105 | rc = -ENOMEM; |
103 | 106 | ||
104 | return rc; | 107 | set_tg_uid(up); |
105 | } | ||
106 | 108 | ||
107 | static void sched_switch_user(struct task_struct *p) | 109 | return rc; |
108 | { | ||
109 | sched_move_task(p); | ||
110 | } | 110 | } |
111 | 111 | ||
112 | #else /* CONFIG_USER_SCHED */ | 112 | #else /* CONFIG_USER_SCHED */ |
113 | 113 | ||
114 | static void sched_destroy_user(struct user_struct *up) { } | 114 | static void sched_destroy_user(struct user_struct *up) { } |
115 | static int sched_create_user(struct user_struct *up) { return 0; } | 115 | static int sched_create_user(struct user_struct *up) { return 0; } |
116 | static void sched_switch_user(struct task_struct *p) { } | ||
117 | 116 | ||
118 | #endif /* CONFIG_USER_SCHED */ | 117 | #endif /* CONFIG_USER_SCHED */ |
119 | 118 | ||
@@ -242,13 +241,21 @@ static struct kobj_type uids_ktype = { | |||
242 | .release = uids_release, | 241 | .release = uids_release, |
243 | }; | 242 | }; |
244 | 243 | ||
245 | /* create /sys/kernel/uids/<uid>/cpu_share file for this user */ | 244 | /* |
245 | * Create /sys/kernel/uids/<uid>/cpu_share file for this user | ||
246 | * We do not create this file for users in a user namespace (until | ||
247 | * sysfs tagging is implemented). | ||
248 | * | ||
249 | * See Documentation/scheduler/sched-design-CFS.txt for ramifications. | ||
250 | */ | ||
246 | static int uids_user_create(struct user_struct *up) | 251 | static int uids_user_create(struct user_struct *up) |
247 | { | 252 | { |
248 | struct kobject *kobj = &up->kobj; | 253 | struct kobject *kobj = &up->kobj; |
249 | int error; | 254 | int error; |
250 | 255 | ||
251 | memset(kobj, 0, sizeof(struct kobject)); | 256 | memset(kobj, 0, sizeof(struct kobject)); |
257 | if (up->user_ns != &init_user_ns) | ||
258 | return 0; | ||
252 | kobj->kset = uids_kset; | 259 | kobj->kset = uids_kset; |
253 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); | 260 | error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid); |
254 | if (error) { | 261 | if (error) { |
@@ -284,6 +291,8 @@ static void remove_user_sysfs_dir(struct work_struct *w) | |||
284 | unsigned long flags; | 291 | unsigned long flags; |
285 | int remove_user = 0; | 292 | int remove_user = 0; |
286 | 293 | ||
294 | if (up->user_ns != &init_user_ns) | ||
295 | return; | ||
287 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() | 296 | /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del() |
288 | * atomic. | 297 | * atomic. |
289 | */ | 298 | */ |
@@ -319,12 +328,13 @@ done: | |||
319 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 328 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
320 | * upon function exit. | 329 | * upon function exit. |
321 | */ | 330 | */ |
322 | static inline void free_user(struct user_struct *up, unsigned long flags) | 331 | static void free_user(struct user_struct *up, unsigned long flags) |
323 | { | 332 | { |
324 | /* restore back the count */ | 333 | /* restore back the count */ |
325 | atomic_inc(&up->__count); | 334 | atomic_inc(&up->__count); |
326 | spin_unlock_irqrestore(&uidhash_lock, flags); | 335 | spin_unlock_irqrestore(&uidhash_lock, flags); |
327 | 336 | ||
337 | put_user_ns(up->user_ns); | ||
328 | INIT_WORK(&up->work, remove_user_sysfs_dir); | 338 | INIT_WORK(&up->work, remove_user_sysfs_dir); |
329 | schedule_work(&up->work); | 339 | schedule_work(&up->work); |
330 | } | 340 | } |
@@ -340,13 +350,14 @@ static inline void uids_mutex_unlock(void) { } | |||
340 | * IRQ state (as stored in flags) is restored and uidhash_lock released | 350 | * IRQ state (as stored in flags) is restored and uidhash_lock released |
341 | * upon function exit. | 351 | * upon function exit. |
342 | */ | 352 | */ |
343 | static inline void free_user(struct user_struct *up, unsigned long flags) | 353 | static void free_user(struct user_struct *up, unsigned long flags) |
344 | { | 354 | { |
345 | uid_hash_remove(up); | 355 | uid_hash_remove(up); |
346 | spin_unlock_irqrestore(&uidhash_lock, flags); | 356 | spin_unlock_irqrestore(&uidhash_lock, flags); |
347 | sched_destroy_user(up); | 357 | sched_destroy_user(up); |
348 | key_put(up->uid_keyring); | 358 | key_put(up->uid_keyring); |
349 | key_put(up->session_keyring); | 359 | key_put(up->session_keyring); |
360 | put_user_ns(up->user_ns); | ||
350 | kmem_cache_free(uid_cachep, up); | 361 | kmem_cache_free(uid_cachep, up); |
351 | } | 362 | } |
352 | 363 | ||
@@ -362,7 +373,7 @@ struct user_struct *find_user(uid_t uid) | |||
362 | { | 373 | { |
363 | struct user_struct *ret; | 374 | struct user_struct *ret; |
364 | unsigned long flags; | 375 | unsigned long flags; |
365 | struct user_namespace *ns = current->nsproxy->user_ns; | 376 | struct user_namespace *ns = current_user_ns(); |
366 | 377 | ||
367 | spin_lock_irqsave(&uidhash_lock, flags); | 378 | spin_lock_irqsave(&uidhash_lock, flags); |
368 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); | 379 | ret = uid_hash_find(uid, uidhashentry(ns, uid)); |
@@ -409,6 +420,8 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
409 | if (sched_create_user(new) < 0) | 420 | if (sched_create_user(new) < 0) |
410 | goto out_free_user; | 421 | goto out_free_user; |
411 | 422 | ||
423 | new->user_ns = get_user_ns(ns); | ||
424 | |||
412 | if (uids_user_create(new)) | 425 | if (uids_user_create(new)) |
413 | goto out_destoy_sched; | 426 | goto out_destoy_sched; |
414 | 427 | ||
@@ -432,7 +445,6 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
432 | up = new; | 445 | up = new; |
433 | } | 446 | } |
434 | spin_unlock_irq(&uidhash_lock); | 447 | spin_unlock_irq(&uidhash_lock); |
435 | |||
436 | } | 448 | } |
437 | 449 | ||
438 | uids_mutex_unlock(); | 450 | uids_mutex_unlock(); |
@@ -441,6 +453,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid) | |||
441 | 453 | ||
442 | out_destoy_sched: | 454 | out_destoy_sched: |
443 | sched_destroy_user(new); | 455 | sched_destroy_user(new); |
456 | put_user_ns(new->user_ns); | ||
444 | out_free_user: | 457 | out_free_user: |
445 | kmem_cache_free(uid_cachep, new); | 458 | kmem_cache_free(uid_cachep, new); |
446 | out_unlock: | 459 | out_unlock: |
@@ -448,63 +461,6 @@ out_unlock: | |||
448 | return NULL; | 461 | return NULL; |
449 | } | 462 | } |
450 | 463 | ||
451 | void switch_uid(struct user_struct *new_user) | ||
452 | { | ||
453 | struct user_struct *old_user; | ||
454 | |||
455 | /* What if a process setreuid()'s and this brings the | ||
456 | * new uid over his NPROC rlimit? We can check this now | ||
457 | * cheaply with the new uid cache, so if it matters | ||
458 | * we should be checking for it. -DaveM | ||
459 | */ | ||
460 | old_user = current->user; | ||
461 | atomic_inc(&new_user->processes); | ||
462 | atomic_dec(&old_user->processes); | ||
463 | switch_uid_keyring(new_user); | ||
464 | current->user = new_user; | ||
465 | sched_switch_user(current); | ||
466 | |||
467 | /* | ||
468 | * We need to synchronize with __sigqueue_alloc() | ||
469 | * doing a get_uid(p->user).. If that saw the old | ||
470 | * user value, we need to wait until it has exited | ||
471 | * its critical region before we can free the old | ||
472 | * structure. | ||
473 | */ | ||
474 | smp_mb(); | ||
475 | spin_unlock_wait(¤t->sighand->siglock); | ||
476 | |||
477 | free_uid(old_user); | ||
478 | suid_keys(current); | ||
479 | } | ||
480 | |||
481 | #ifdef CONFIG_USER_NS | ||
482 | void release_uids(struct user_namespace *ns) | ||
483 | { | ||
484 | int i; | ||
485 | unsigned long flags; | ||
486 | struct hlist_head *head; | ||
487 | struct hlist_node *nd; | ||
488 | |||
489 | spin_lock_irqsave(&uidhash_lock, flags); | ||
490 | /* | ||
491 | * collapse the chains so that the user_struct-s will | ||
492 | * be still alive, but not in hashes. subsequent free_uid() | ||
493 | * will free them. | ||
494 | */ | ||
495 | for (i = 0; i < UIDHASH_SZ; i++) { | ||
496 | head = ns->uidhash_table + i; | ||
497 | while (!hlist_empty(head)) { | ||
498 | nd = head->first; | ||
499 | hlist_del_init(nd); | ||
500 | } | ||
501 | } | ||
502 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
503 | |||
504 | free_uid(ns->root_user); | ||
505 | } | ||
506 | #endif | ||
507 | |||
508 | static int __init uid_cache_init(void) | 464 | static int __init uid_cache_init(void) |
509 | { | 465 | { |
510 | int n; | 466 | int n; |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 532858fa5b88..79084311ee57 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -9,60 +9,55 @@ | |||
9 | #include <linux/nsproxy.h> | 9 | #include <linux/nsproxy.h> |
10 | #include <linux/slab.h> | 10 | #include <linux/slab.h> |
11 | #include <linux/user_namespace.h> | 11 | #include <linux/user_namespace.h> |
12 | #include <linux/cred.h> | ||
12 | 13 | ||
13 | /* | 14 | /* |
14 | * Clone a new ns copying an original user ns, setting refcount to 1 | 15 | * Create a new user namespace, deriving the creator from the user in the |
15 | * @old_ns: namespace to clone | 16 | * passed credentials, and replacing that user with the new root user for the |
16 | * Return NULL on error (failure to kmalloc), new ns otherwise | 17 | * new namespace. |
18 | * | ||
19 | * This is called by copy_creds(), which will finish setting the target task's | ||
20 | * credentials. | ||
17 | */ | 21 | */ |
18 | static struct user_namespace *clone_user_ns(struct user_namespace *old_ns) | 22 | int create_user_ns(struct cred *new) |
19 | { | 23 | { |
20 | struct user_namespace *ns; | 24 | struct user_namespace *ns; |
21 | struct user_struct *new_user; | 25 | struct user_struct *root_user; |
22 | int n; | 26 | int n; |
23 | 27 | ||
24 | ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); | 28 | ns = kmalloc(sizeof(struct user_namespace), GFP_KERNEL); |
25 | if (!ns) | 29 | if (!ns) |
26 | return ERR_PTR(-ENOMEM); | 30 | return -ENOMEM; |
27 | 31 | ||
28 | kref_init(&ns->kref); | 32 | kref_init(&ns->kref); |
29 | 33 | ||
30 | for (n = 0; n < UIDHASH_SZ; ++n) | 34 | for (n = 0; n < UIDHASH_SZ; ++n) |
31 | INIT_HLIST_HEAD(ns->uidhash_table + n); | 35 | INIT_HLIST_HEAD(ns->uidhash_table + n); |
32 | 36 | ||
33 | /* Insert new root user. */ | 37 | /* Alloc new root user. */ |
34 | ns->root_user = alloc_uid(ns, 0); | 38 | root_user = alloc_uid(ns, 0); |
35 | if (!ns->root_user) { | 39 | if (!root_user) { |
36 | kfree(ns); | 40 | kfree(ns); |
37 | return ERR_PTR(-ENOMEM); | 41 | return -ENOMEM; |
38 | } | 42 | } |
39 | 43 | ||
40 | /* Reset current->user with a new one */ | 44 | /* set the new root user in the credentials under preparation */ |
41 | new_user = alloc_uid(ns, current->uid); | 45 | ns->creator = new->user; |
42 | if (!new_user) { | 46 | new->user = root_user; |
43 | free_uid(ns->root_user); | 47 | new->uid = new->euid = new->suid = new->fsuid = 0; |
44 | kfree(ns); | 48 | new->gid = new->egid = new->sgid = new->fsgid = 0; |
45 | return ERR_PTR(-ENOMEM); | 49 | put_group_info(new->group_info); |
46 | } | 50 | new->group_info = get_group_info(&init_groups); |
47 | 51 | #ifdef CONFIG_KEYS | |
48 | switch_uid(new_user); | 52 | key_put(new->request_key_auth); |
49 | return ns; | 53 | new->request_key_auth = NULL; |
50 | } | 54 | #endif |
51 | 55 | /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */ | |
52 | struct user_namespace * copy_user_ns(int flags, struct user_namespace *old_ns) | ||
53 | { | ||
54 | struct user_namespace *new_ns; | ||
55 | |||
56 | BUG_ON(!old_ns); | ||
57 | get_user_ns(old_ns); | ||
58 | |||
59 | if (!(flags & CLONE_NEWUSER)) | ||
60 | return old_ns; | ||
61 | 56 | ||
62 | new_ns = clone_user_ns(old_ns); | 57 | /* alloc_uid() incremented the userns refcount. Just set it to 1 */ |
58 | kref_set(&ns->kref, 1); | ||
63 | 59 | ||
64 | put_user_ns(old_ns); | 60 | return 0; |
65 | return new_ns; | ||
66 | } | 61 | } |
67 | 62 | ||
68 | void free_user_ns(struct kref *kref) | 63 | void free_user_ns(struct kref *kref) |
@@ -70,7 +65,7 @@ void free_user_ns(struct kref *kref) | |||
70 | struct user_namespace *ns; | 65 | struct user_namespace *ns; |
71 | 66 | ||
72 | ns = container_of(kref, struct user_namespace, kref); | 67 | ns = container_of(kref, struct user_namespace, kref); |
73 | release_uids(ns); | 68 | free_uid(ns->creator); |
74 | kfree(ns); | 69 | kfree(ns); |
75 | } | 70 | } |
76 | EXPORT_SYMBOL(free_user_ns); | 71 | EXPORT_SYMBOL(free_user_ns); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index d4dc69ddebd7..4952322cba45 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -84,21 +84,21 @@ static cpumask_t cpu_singlethread_map __read_mostly; | |||
84 | static cpumask_t cpu_populated_map __read_mostly; | 84 | static cpumask_t cpu_populated_map __read_mostly; |
85 | 85 | ||
86 | /* If it's single threaded, it isn't in the list of workqueues. */ | 86 | /* If it's single threaded, it isn't in the list of workqueues. */ |
87 | static inline int is_single_threaded(struct workqueue_struct *wq) | 87 | static inline int is_wq_single_threaded(struct workqueue_struct *wq) |
88 | { | 88 | { |
89 | return wq->singlethread; | 89 | return wq->singlethread; |
90 | } | 90 | } |
91 | 91 | ||
92 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) | 92 | static const cpumask_t *wq_cpu_map(struct workqueue_struct *wq) |
93 | { | 93 | { |
94 | return is_single_threaded(wq) | 94 | return is_wq_single_threaded(wq) |
95 | ? &cpu_singlethread_map : &cpu_populated_map; | 95 | ? &cpu_singlethread_map : &cpu_populated_map; |
96 | } | 96 | } |
97 | 97 | ||
98 | static | 98 | static |
99 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) | 99 | struct cpu_workqueue_struct *wq_per_cpu(struct workqueue_struct *wq, int cpu) |
100 | { | 100 | { |
101 | if (unlikely(is_single_threaded(wq))) | 101 | if (unlikely(is_wq_single_threaded(wq))) |
102 | cpu = singlethread_cpu; | 102 | cpu = singlethread_cpu; |
103 | return per_cpu_ptr(wq->cpu_wq, cpu); | 103 | return per_cpu_ptr(wq->cpu_wq, cpu); |
104 | } | 104 | } |
@@ -769,7 +769,7 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
769 | { | 769 | { |
770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
771 | struct workqueue_struct *wq = cwq->wq; | 771 | struct workqueue_struct *wq = cwq->wq; |
772 | const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; | 772 | const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d"; |
773 | struct task_struct *p; | 773 | struct task_struct *p; |
774 | 774 | ||
775 | p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); | 775 | p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); |