diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/audit.c | 2 | ||||
-rw-r--r-- | kernel/auditsc.c | 49 | ||||
-rw-r--r-- | kernel/bpf/core.c | 2 | ||||
-rw-r--r-- | kernel/debug/debug_core.c | 52 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_bp.c | 37 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_debugger.c | 4 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_main.c | 269 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_private.h | 3 | ||||
-rw-r--r-- | kernel/events/core.c | 19 | ||||
-rw-r--r-- | kernel/exit.c | 12 | ||||
-rw-r--r-- | kernel/kprobes.c | 2 | ||||
-rw-r--r-- | kernel/locking/mutex-debug.c | 2 | ||||
-rw-r--r-- | kernel/module.c | 91 | ||||
-rw-r--r-- | kernel/params.c | 3 | ||||
-rw-r--r-- | kernel/range.c | 10 | ||||
-rw-r--r-- | kernel/sched/core.c | 13 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 25 | ||||
-rw-r--r-- | kernel/sched/fair.c | 6 | ||||
-rw-r--r-- | kernel/sys.c | 4 | ||||
-rw-r--r-- | kernel/time/ntp.c | 7 | ||||
-rw-r--r-- | kernel/time/time.c | 4 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 53 | ||||
-rw-r--r-- | kernel/trace/trace.c | 1 | ||||
-rw-r--r-- | kernel/trace/trace_events.c | 69 | ||||
-rw-r--r-- | kernel/trace/trace_kdb.c | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 25 |
26 files changed, 503 insertions, 265 deletions
diff --git a/kernel/audit.c b/kernel/audit.c index 231b7dcb154b..72ab759a0b43 100644 --- a/kernel/audit.c +++ b/kernel/audit.c | |||
@@ -1100,7 +1100,7 @@ static void audit_receive(struct sk_buff *skb) | |||
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | /* Run custom bind function on netlink socket group connect or bind requests. */ | 1102 | /* Run custom bind function on netlink socket group connect or bind requests. */ |
1103 | static int audit_bind(int group) | 1103 | static int audit_bind(struct net *net, int group) |
1104 | { | 1104 | { |
1105 | if (!capable(CAP_AUDIT_READ)) | 1105 | if (!capable(CAP_AUDIT_READ)) |
1106 | return -EPERM; | 1106 | return -EPERM; |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 37c69ab561da..072566dd0caf 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -72,6 +72,8 @@ | |||
72 | #include <linux/fs_struct.h> | 72 | #include <linux/fs_struct.h> |
73 | #include <linux/compat.h> | 73 | #include <linux/compat.h> |
74 | #include <linux/ctype.h> | 74 | #include <linux/ctype.h> |
75 | #include <linux/string.h> | ||
76 | #include <uapi/linux/limits.h> | ||
75 | 77 | ||
76 | #include "audit.h" | 78 | #include "audit.h" |
77 | 79 | ||
@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry, | |||
1861 | } | 1863 | } |
1862 | 1864 | ||
1863 | list_for_each_entry_reverse(n, &context->names_list, list) { | 1865 | list_for_each_entry_reverse(n, &context->names_list, list) { |
1864 | /* does the name pointer match? */ | 1866 | if (!n->name || strcmp(n->name->name, name->name)) |
1865 | if (!n->name || n->name->name != name->name) | ||
1866 | continue; | 1867 | continue; |
1867 | 1868 | ||
1868 | /* match the correct record type */ | 1869 | /* match the correct record type */ |
@@ -1881,14 +1882,44 @@ out_alloc: | |||
1881 | n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); | 1882 | n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN); |
1882 | if (!n) | 1883 | if (!n) |
1883 | return; | 1884 | return; |
1884 | if (name) | 1885 | /* unfortunately, while we may have a path name to record with the |
1885 | /* since name is not NULL we know there is already a matching | 1886 | * inode, we can't always rely on the string lasting until the end of |
1886 | * name record, see audit_getname(), so there must be a type | 1887 | * the syscall so we need to create our own copy, it may fail due to |
1887 | * mismatch; reuse the string path since the original name | 1888 | * memory allocation issues, but we do our best */ |
1888 | * record will keep the string valid until we free it in | 1889 | if (name) { |
1889 | * audit_free_names() */ | 1890 | /* we can't use getname_kernel() due to size limits */ |
1890 | n->name = name; | 1891 | size_t len = strlen(name->name) + 1; |
1892 | struct filename *new = __getname(); | ||
1893 | |||
1894 | if (unlikely(!new)) | ||
1895 | goto out; | ||
1896 | |||
1897 | if (len <= (PATH_MAX - sizeof(*new))) { | ||
1898 | new->name = (char *)(new) + sizeof(*new); | ||
1899 | new->separate = false; | ||
1900 | } else if (len <= PATH_MAX) { | ||
1901 | /* this looks odd, but is due to final_putname() */ | ||
1902 | struct filename *new2; | ||
1891 | 1903 | ||
1904 | new2 = kmalloc(sizeof(*new2), GFP_KERNEL); | ||
1905 | if (unlikely(!new2)) { | ||
1906 | __putname(new); | ||
1907 | goto out; | ||
1908 | } | ||
1909 | new2->name = (char *)new; | ||
1910 | new2->separate = true; | ||
1911 | new = new2; | ||
1912 | } else { | ||
1913 | /* we should never get here, but let's be safe */ | ||
1914 | __putname(new); | ||
1915 | goto out; | ||
1916 | } | ||
1917 | strlcpy((char *)new->name, name->name, len); | ||
1918 | new->uptr = NULL; | ||
1919 | new->aname = n; | ||
1920 | n->name = new; | ||
1921 | n->name_put = true; | ||
1922 | } | ||
1892 | out: | 1923 | out: |
1893 | if (parent) { | 1924 | if (parent) { |
1894 | n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; | 1925 | n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL; |
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d6594e457a25..a64e7a207d2b 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c | |||
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |||
163 | 163 | ||
164 | void bpf_jit_binary_free(struct bpf_binary_header *hdr) | 164 | void bpf_jit_binary_free(struct bpf_binary_header *hdr) |
165 | { | 165 | { |
166 | module_free(NULL, hdr); | 166 | module_memfree(hdr); |
167 | } | 167 | } |
168 | #endif /* CONFIG_BPF_JIT */ | 168 | #endif /* CONFIG_BPF_JIT */ |
169 | 169 | ||
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 1adf62b39b96..07ce18ca71e0 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c | |||
@@ -27,6 +27,9 @@ | |||
27 | * version 2. This program is licensed "as is" without any warranty of any | 27 | * version 2. This program is licensed "as is" without any warranty of any |
28 | * kind, whether express or implied. | 28 | * kind, whether express or implied. |
29 | */ | 29 | */ |
30 | |||
31 | #define pr_fmt(fmt) "KGDB: " fmt | ||
32 | |||
30 | #include <linux/pid_namespace.h> | 33 | #include <linux/pid_namespace.h> |
31 | #include <linux/clocksource.h> | 34 | #include <linux/clocksource.h> |
32 | #include <linux/serial_core.h> | 35 | #include <linux/serial_core.h> |
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr) | |||
196 | return err; | 199 | return err; |
197 | err = kgdb_arch_remove_breakpoint(&tmp); | 200 | err = kgdb_arch_remove_breakpoint(&tmp); |
198 | if (err) | 201 | if (err) |
199 | printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " | 202 | pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n", |
200 | "memory destroyed at: %lx", addr); | 203 | addr); |
201 | return err; | 204 | return err; |
202 | } | 205 | } |
203 | 206 | ||
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void) | |||
256 | error = kgdb_arch_set_breakpoint(&kgdb_break[i]); | 259 | error = kgdb_arch_set_breakpoint(&kgdb_break[i]); |
257 | if (error) { | 260 | if (error) { |
258 | ret = error; | 261 | ret = error; |
259 | printk(KERN_INFO "KGDB: BP install failed: %lx", | 262 | pr_info("BP install failed: %lx\n", |
260 | kgdb_break[i].bpt_addr); | 263 | kgdb_break[i].bpt_addr); |
261 | continue; | 264 | continue; |
262 | } | 265 | } |
263 | 266 | ||
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void) | |||
319 | continue; | 322 | continue; |
320 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); | 323 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); |
321 | if (error) { | 324 | if (error) { |
322 | printk(KERN_INFO "KGDB: BP remove failed: %lx\n", | 325 | pr_info("BP remove failed: %lx\n", |
323 | kgdb_break[i].bpt_addr); | 326 | kgdb_break[i].bpt_addr); |
324 | ret = error; | 327 | ret = error; |
325 | } | 328 | } |
326 | 329 | ||
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void) | |||
367 | goto setundefined; | 370 | goto setundefined; |
368 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); | 371 | error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); |
369 | if (error) | 372 | if (error) |
370 | printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", | 373 | pr_err("breakpoint remove failed: %lx\n", |
371 | kgdb_break[i].bpt_addr); | 374 | kgdb_break[i].bpt_addr); |
372 | setundefined: | 375 | setundefined: |
373 | kgdb_break[i].state = BP_UNDEFINED; | 376 | kgdb_break[i].state = BP_UNDEFINED; |
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait) | |||
400 | if (print_wait) { | 403 | if (print_wait) { |
401 | #ifdef CONFIG_KGDB_KDB | 404 | #ifdef CONFIG_KGDB_KDB |
402 | if (!dbg_kdb_mode) | 405 | if (!dbg_kdb_mode) |
403 | printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); | 406 | pr_crit("waiting... or $3#33 for KDB\n"); |
404 | #else | 407 | #else |
405 | printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); | 408 | pr_crit("Waiting for remote debugger\n"); |
406 | #endif | 409 | #endif |
407 | } | 410 | } |
408 | return 1; | 411 | return 1; |
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
430 | exception_level = 0; | 433 | exception_level = 0; |
431 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); | 434 | kgdb_skipexception(ks->ex_vector, ks->linux_regs); |
432 | dbg_activate_sw_breakpoints(); | 435 | dbg_activate_sw_breakpoints(); |
433 | printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", | 436 | pr_crit("re-enter error: breakpoint removed %lx\n", addr); |
434 | addr); | ||
435 | WARN_ON_ONCE(1); | 437 | WARN_ON_ONCE(1); |
436 | 438 | ||
437 | return 1; | 439 | return 1; |
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) | |||
444 | panic("Recursive entry to debugger"); | 446 | panic("Recursive entry to debugger"); |
445 | } | 447 | } |
446 | 448 | ||
447 | printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); | 449 | pr_crit("re-enter exception: ALL breakpoints killed\n"); |
448 | #ifdef CONFIG_KGDB_KDB | 450 | #ifdef CONFIG_KGDB_KDB |
449 | /* Allow kdb to debug itself one level */ | 451 | /* Allow kdb to debug itself one level */ |
450 | return 0; | 452 | return 0; |
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, | |||
471 | int cpu; | 473 | int cpu; |
472 | int trace_on = 0; | 474 | int trace_on = 0; |
473 | int online_cpus = num_online_cpus(); | 475 | int online_cpus = num_online_cpus(); |
476 | u64 time_left; | ||
474 | 477 | ||
475 | kgdb_info[ks->cpu].enter_kgdb++; | 478 | kgdb_info[ks->cpu].enter_kgdb++; |
476 | kgdb_info[ks->cpu].exception_state |= exception_state; | 479 | kgdb_info[ks->cpu].exception_state |= exception_state; |
@@ -595,9 +598,13 @@ return_normal: | |||
595 | /* | 598 | /* |
596 | * Wait for the other CPUs to be notified and be waiting for us: | 599 | * Wait for the other CPUs to be notified and be waiting for us: |
597 | */ | 600 | */ |
598 | while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + | 601 | time_left = loops_per_jiffy * HZ; |
599 | atomic_read(&slaves_in_kgdb)) != online_cpus) | 602 | while (kgdb_do_roundup && --time_left && |
603 | (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != | ||
604 | online_cpus) | ||
600 | cpu_relax(); | 605 | cpu_relax(); |
606 | if (!time_left) | ||
607 | pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); | ||
601 | 608 | ||
602 | /* | 609 | /* |
603 | * At this point the primary processor is completely | 610 | * At this point the primary processor is completely |
@@ -795,15 +802,15 @@ static struct console kgdbcons = { | |||
795 | static void sysrq_handle_dbg(int key) | 802 | static void sysrq_handle_dbg(int key) |
796 | { | 803 | { |
797 | if (!dbg_io_ops) { | 804 | if (!dbg_io_ops) { |
798 | printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); | 805 | pr_crit("ERROR: No KGDB I/O module available\n"); |
799 | return; | 806 | return; |
800 | } | 807 | } |
801 | if (!kgdb_connected) { | 808 | if (!kgdb_connected) { |
802 | #ifdef CONFIG_KGDB_KDB | 809 | #ifdef CONFIG_KGDB_KDB |
803 | if (!dbg_kdb_mode) | 810 | if (!dbg_kdb_mode) |
804 | printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); | 811 | pr_crit("KGDB or $3#33 for KDB\n"); |
805 | #else | 812 | #else |
806 | printk(KERN_CRIT "Entering KGDB\n"); | 813 | pr_crit("Entering KGDB\n"); |
807 | #endif | 814 | #endif |
808 | } | 815 | } |
809 | 816 | ||
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void) | |||
945 | { | 952 | { |
946 | kgdb_break_asap = 0; | 953 | kgdb_break_asap = 0; |
947 | 954 | ||
948 | printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); | 955 | pr_crit("Waiting for connection from remote gdb...\n"); |
949 | kgdb_breakpoint(); | 956 | kgdb_breakpoint(); |
950 | } | 957 | } |
951 | 958 | ||
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) | |||
964 | if (dbg_io_ops) { | 971 | if (dbg_io_ops) { |
965 | spin_unlock(&kgdb_registration_lock); | 972 | spin_unlock(&kgdb_registration_lock); |
966 | 973 | ||
967 | printk(KERN_ERR "kgdb: Another I/O driver is already " | 974 | pr_err("Another I/O driver is already registered with KGDB\n"); |
968 | "registered with KGDB.\n"); | ||
969 | return -EBUSY; | 975 | return -EBUSY; |
970 | } | 976 | } |
971 | 977 | ||
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) | |||
981 | 987 | ||
982 | spin_unlock(&kgdb_registration_lock); | 988 | spin_unlock(&kgdb_registration_lock); |
983 | 989 | ||
984 | printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", | 990 | pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); |
985 | new_dbg_io_ops->name); | ||
986 | 991 | ||
987 | /* Arm KGDB now. */ | 992 | /* Arm KGDB now. */ |
988 | kgdb_register_callbacks(); | 993 | kgdb_register_callbacks(); |
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) | |||
1017 | 1022 | ||
1018 | spin_unlock(&kgdb_registration_lock); | 1023 | spin_unlock(&kgdb_registration_lock); |
1019 | 1024 | ||
1020 | printk(KERN_INFO | 1025 | pr_info("Unregistered I/O driver %s, debugger disabled\n", |
1021 | "kgdb: Unregistered I/O driver %s, debugger disabled.\n", | ||
1022 | old_dbg_io_ops->name); | 1026 | old_dbg_io_ops->name); |
1023 | } | 1027 | } |
1024 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); | 1028 | EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); |
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index b20d544f20c2..e1dbf4a2c69e 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c | |||
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void) | |||
531 | for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) | 531 | for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) |
532 | bp->bp_free = 1; | 532 | bp->bp_free = 1; |
533 | 533 | ||
534 | kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", | 534 | kdb_register_flags("bp", kdb_bp, "[<vaddr>]", |
535 | "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); | 535 | "Set/Display breakpoints", 0, |
536 | kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", | 536 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); |
537 | "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); | 537 | kdb_register_flags("bl", kdb_bp, "[<vaddr>]", |
538 | "Display breakpoints", 0, | ||
539 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); | ||
538 | if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) | 540 | if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) |
539 | kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", | 541 | kdb_register_flags("bph", kdb_bp, "[<vaddr>]", |
540 | "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS); | 542 | "[datar [length]|dataw [length]] Set hw brk", 0, |
541 | kdb_register_repeat("bc", kdb_bc, "<bpnum>", | 543 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); |
542 | "Clear Breakpoint", 0, KDB_REPEAT_NONE); | 544 | kdb_register_flags("bc", kdb_bc, "<bpnum>", |
543 | kdb_register_repeat("be", kdb_bc, "<bpnum>", | 545 | "Clear Breakpoint", 0, |
544 | "Enable Breakpoint", 0, KDB_REPEAT_NONE); | 546 | KDB_ENABLE_FLOW_CTRL); |
545 | kdb_register_repeat("bd", kdb_bc, "<bpnum>", | 547 | kdb_register_flags("be", kdb_bc, "<bpnum>", |
546 | "Disable Breakpoint", 0, KDB_REPEAT_NONE); | 548 | "Enable Breakpoint", 0, |
547 | 549 | KDB_ENABLE_FLOW_CTRL); | |
548 | kdb_register_repeat("ss", kdb_ss, "", | 550 | kdb_register_flags("bd", kdb_bc, "<bpnum>", |
549 | "Single Step", 1, KDB_REPEAT_NO_ARGS); | 551 | "Disable Breakpoint", 0, |
552 | KDB_ENABLE_FLOW_CTRL); | ||
553 | |||
554 | kdb_register_flags("ss", kdb_ss, "", | ||
555 | "Single Step", 1, | ||
556 | KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); | ||
550 | /* | 557 | /* |
551 | * Architecture dependent initialization. | 558 | * Architecture dependent initialization. |
552 | */ | 559 | */ |
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index 8859ca34dcfe..15e1a7af5dd0 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c | |||
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks) | |||
129 | ks->pass_exception = 1; | 129 | ks->pass_exception = 1; |
130 | KDB_FLAG_SET(CATASTROPHIC); | 130 | KDB_FLAG_SET(CATASTROPHIC); |
131 | } | 131 | } |
132 | /* set CATASTROPHIC if the system contains unresponsive processors */ | ||
133 | for_each_online_cpu(i) | ||
134 | if (!kgdb_info[i].enter_kgdb) | ||
135 | KDB_FLAG_SET(CATASTROPHIC); | ||
132 | if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { | 136 | if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { |
133 | KDB_STATE_CLEAR(SSBPT); | 137 | KDB_STATE_CLEAR(SSBPT); |
134 | KDB_STATE_CLEAR(DOING_SS); | 138 | KDB_STATE_CLEAR(DOING_SS); |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 379650b984f8..7b40c5f07dce 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/ctype.h> | 14 | #include <linux/ctype.h> |
15 | #include <linux/types.h> | ||
15 | #include <linux/string.h> | 16 | #include <linux/string.h> |
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/kmsg_dump.h> | 18 | #include <linux/kmsg_dump.h> |
@@ -23,6 +24,7 @@ | |||
23 | #include <linux/vmalloc.h> | 24 | #include <linux/vmalloc.h> |
24 | #include <linux/atomic.h> | 25 | #include <linux/atomic.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/moduleparam.h> | ||
26 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
27 | #include <linux/init.h> | 29 | #include <linux/init.h> |
28 | #include <linux/kallsyms.h> | 30 | #include <linux/kallsyms.h> |
@@ -42,6 +44,12 @@ | |||
42 | #include <linux/slab.h> | 44 | #include <linux/slab.h> |
43 | #include "kdb_private.h" | 45 | #include "kdb_private.h" |
44 | 46 | ||
47 | #undef MODULE_PARAM_PREFIX | ||
48 | #define MODULE_PARAM_PREFIX "kdb." | ||
49 | |||
50 | static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; | ||
51 | module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); | ||
52 | |||
45 | #define GREP_LEN 256 | 53 | #define GREP_LEN 256 |
46 | char kdb_grep_string[GREP_LEN]; | 54 | char kdb_grep_string[GREP_LEN]; |
47 | int kdb_grepping_flag; | 55 | int kdb_grepping_flag; |
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = { | |||
121 | KDBMSG(BADLENGTH, "Invalid length field"), | 129 | KDBMSG(BADLENGTH, "Invalid length field"), |
122 | KDBMSG(NOBP, "No Breakpoint exists"), | 130 | KDBMSG(NOBP, "No Breakpoint exists"), |
123 | KDBMSG(BADADDR, "Invalid address"), | 131 | KDBMSG(BADADDR, "Invalid address"), |
132 | KDBMSG(NOPERM, "Permission denied"), | ||
124 | }; | 133 | }; |
125 | #undef KDBMSG | 134 | #undef KDBMSG |
126 | 135 | ||
@@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu) | |||
188 | } | 197 | } |
189 | 198 | ||
190 | /* | 199 | /* |
200 | * Check whether the flags of the current command and the permissions | ||
201 | * of the kdb console has allow a command to be run. | ||
202 | */ | ||
203 | static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, | ||
204 | bool no_args) | ||
205 | { | ||
206 | /* permissions comes from userspace so needs massaging slightly */ | ||
207 | permissions &= KDB_ENABLE_MASK; | ||
208 | permissions |= KDB_ENABLE_ALWAYS_SAFE; | ||
209 | |||
210 | /* some commands change group when launched with no arguments */ | ||
211 | if (no_args) | ||
212 | permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT; | ||
213 | |||
214 | flags |= KDB_ENABLE_ALL; | ||
215 | |||
216 | return permissions & flags; | ||
217 | } | ||
218 | |||
219 | /* | ||
191 | * kdbgetenv - This function will return the character string value of | 220 | * kdbgetenv - This function will return the character string value of |
192 | * an environment variable. | 221 | * an environment variable. |
193 | * Parameters: | 222 | * Parameters: |
@@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg, | |||
476 | kdb_symtab_t symtab; | 505 | kdb_symtab_t symtab; |
477 | 506 | ||
478 | /* | 507 | /* |
508 | * If the enable flags prohibit both arbitrary memory access | ||
509 | * and flow control then there are no reasonable grounds to | ||
510 | * provide symbol lookup. | ||
511 | */ | ||
512 | if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL, | ||
513 | kdb_cmd_enabled, false)) | ||
514 | return KDB_NOPERM; | ||
515 | |||
516 | /* | ||
479 | * Process arguments which follow the following syntax: | 517 | * Process arguments which follow the following syntax: |
480 | * | 518 | * |
481 | * symbol | numeric-address [+/- numeric-offset] | 519 | * symbol | numeric-address [+/- numeric-offset] |
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) | |||
641 | if (!s->count) | 679 | if (!s->count) |
642 | s->usable = 0; | 680 | s->usable = 0; |
643 | if (s->usable) | 681 | if (s->usable) |
644 | kdb_register(s->name, kdb_exec_defcmd, | 682 | /* macros are always safe because when executed each |
645 | s->usage, s->help, 0); | 683 | * internal command re-enters kdb_parse() and is |
684 | * safety checked individually. | ||
685 | */ | ||
686 | kdb_register_flags(s->name, kdb_exec_defcmd, s->usage, | ||
687 | s->help, 0, | ||
688 | KDB_ENABLE_ALWAYS_SAFE); | ||
646 | return 0; | 689 | return 0; |
647 | } | 690 | } |
648 | if (!s->usable) | 691 | if (!s->usable) |
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr) | |||
1003 | 1046 | ||
1004 | if (i < kdb_max_commands) { | 1047 | if (i < kdb_max_commands) { |
1005 | int result; | 1048 | int result; |
1049 | |||
1050 | if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1)) | ||
1051 | return KDB_NOPERM; | ||
1052 | |||
1006 | KDB_STATE_SET(CMD); | 1053 | KDB_STATE_SET(CMD); |
1007 | result = (*tp->cmd_func)(argc-1, (const char **)argv); | 1054 | result = (*tp->cmd_func)(argc-1, (const char **)argv); |
1008 | if (result && ignore_errors && result > KDB_CMD_GO) | 1055 | if (result && ignore_errors && result > KDB_CMD_GO) |
1009 | result = 0; | 1056 | result = 0; |
1010 | KDB_STATE_CLEAR(CMD); | 1057 | KDB_STATE_CLEAR(CMD); |
1011 | switch (tp->cmd_repeat) { | 1058 | |
1012 | case KDB_REPEAT_NONE: | 1059 | if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS) |
1013 | argc = 0; | 1060 | return result; |
1014 | if (argv[0]) | 1061 | |
1015 | *(argv[0]) = '\0'; | 1062 | argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0; |
1016 | break; | 1063 | if (argv[argc]) |
1017 | case KDB_REPEAT_NO_ARGS: | 1064 | *(argv[argc]) = '\0'; |
1018 | argc = 1; | ||
1019 | if (argv[1]) | ||
1020 | *(argv[1]) = '\0'; | ||
1021 | break; | ||
1022 | case KDB_REPEAT_WITH_ARGS: | ||
1023 | break; | ||
1024 | } | ||
1025 | return result; | 1065 | return result; |
1026 | } | 1066 | } |
1027 | 1067 | ||
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv) | |||
1921 | */ | 1961 | */ |
1922 | static int kdb_sr(int argc, const char **argv) | 1962 | static int kdb_sr(int argc, const char **argv) |
1923 | { | 1963 | { |
1964 | bool check_mask = | ||
1965 | !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false); | ||
1966 | |||
1924 | if (argc != 1) | 1967 | if (argc != 1) |
1925 | return KDB_ARGCOUNT; | 1968 | return KDB_ARGCOUNT; |
1969 | |||
1926 | kdb_trap_printk++; | 1970 | kdb_trap_printk++; |
1927 | __handle_sysrq(*argv[1], false); | 1971 | __handle_sysrq(*argv[1], check_mask); |
1928 | kdb_trap_printk--; | 1972 | kdb_trap_printk--; |
1929 | 1973 | ||
1930 | return 0; | 1974 | return 0; |
@@ -1979,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv) | |||
1979 | kdb_printf("%-20s%8u 0x%p ", mod->name, | 2023 | kdb_printf("%-20s%8u 0x%p ", mod->name, |
1980 | mod->core_size, (void *)mod); | 2024 | mod->core_size, (void *)mod); |
1981 | #ifdef CONFIG_MODULE_UNLOAD | 2025 | #ifdef CONFIG_MODULE_UNLOAD |
1982 | kdb_printf("%4ld ", module_refcount(mod)); | 2026 | kdb_printf("%4d ", module_refcount(mod)); |
1983 | #endif | 2027 | #endif |
1984 | if (mod->state == MODULE_STATE_GOING) | 2028 | if (mod->state == MODULE_STATE_GOING) |
1985 | kdb_printf(" (Unloading)"); | 2029 | kdb_printf(" (Unloading)"); |
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void) | |||
2157 | for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { | 2201 | for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { |
2158 | if (!cpu_online(i)) { | 2202 | if (!cpu_online(i)) { |
2159 | state = 'F'; /* cpu is offline */ | 2203 | state = 'F'; /* cpu is offline */ |
2204 | } else if (!kgdb_info[i].enter_kgdb) { | ||
2205 | state = 'D'; /* cpu is online but unresponsive */ | ||
2160 | } else { | 2206 | } else { |
2161 | state = ' '; /* cpu is responding to kdb */ | 2207 | state = ' '; /* cpu is responding to kdb */ |
2162 | if (kdb_task_state_char(KDB_TSK(i)) == 'I') | 2208 | if (kdb_task_state_char(KDB_TSK(i)) == 'I') |
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv) | |||
2210 | /* | 2256 | /* |
2211 | * Validate cpunum | 2257 | * Validate cpunum |
2212 | */ | 2258 | */ |
2213 | if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) | 2259 | if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) |
2214 | return KDB_BADCPUNUM; | 2260 | return KDB_BADCPUNUM; |
2215 | 2261 | ||
2216 | dbg_switch_cpu = cpunum; | 2262 | dbg_switch_cpu = cpunum; |
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv) | |||
2375 | return 0; | 2421 | return 0; |
2376 | if (!kt->cmd_name) | 2422 | if (!kt->cmd_name) |
2377 | continue; | 2423 | continue; |
2424 | if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true)) | ||
2425 | continue; | ||
2378 | if (strlen(kt->cmd_usage) > 20) | 2426 | if (strlen(kt->cmd_usage) > 20) |
2379 | space = "\n "; | 2427 | space = "\n "; |
2380 | kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, | 2428 | kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, |
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv) | |||
2629 | } | 2677 | } |
2630 | 2678 | ||
2631 | /* | 2679 | /* |
2632 | * kdb_register_repeat - This function is used to register a kernel | 2680 | * kdb_register_flags - This function is used to register a kernel |
2633 | * debugger command. | 2681 | * debugger command. |
2634 | * Inputs: | 2682 | * Inputs: |
2635 | * cmd Command name | 2683 | * cmd Command name |
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv) | |||
2641 | * zero for success, one if a duplicate command. | 2689 | * zero for success, one if a duplicate command. |
2642 | */ | 2690 | */ |
2643 | #define kdb_command_extend 50 /* arbitrary */ | 2691 | #define kdb_command_extend 50 /* arbitrary */ |
2644 | int kdb_register_repeat(char *cmd, | 2692 | int kdb_register_flags(char *cmd, |
2645 | kdb_func_t func, | 2693 | kdb_func_t func, |
2646 | char *usage, | 2694 | char *usage, |
2647 | char *help, | 2695 | char *help, |
2648 | short minlen, | 2696 | short minlen, |
2649 | kdb_repeat_t repeat) | 2697 | kdb_cmdflags_t flags) |
2650 | { | 2698 | { |
2651 | int i; | 2699 | int i; |
2652 | kdbtab_t *kp; | 2700 | kdbtab_t *kp; |
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd, | |||
2694 | kp->cmd_func = func; | 2742 | kp->cmd_func = func; |
2695 | kp->cmd_usage = usage; | 2743 | kp->cmd_usage = usage; |
2696 | kp->cmd_help = help; | 2744 | kp->cmd_help = help; |
2697 | kp->cmd_flags = 0; | ||
2698 | kp->cmd_minlen = minlen; | 2745 | kp->cmd_minlen = minlen; |
2699 | kp->cmd_repeat = repeat; | 2746 | kp->cmd_flags = flags; |
2700 | 2747 | ||
2701 | return 0; | 2748 | return 0; |
2702 | } | 2749 | } |
2703 | EXPORT_SYMBOL_GPL(kdb_register_repeat); | 2750 | EXPORT_SYMBOL_GPL(kdb_register_flags); |
2704 | 2751 | ||
2705 | 2752 | ||
2706 | /* | 2753 | /* |
2707 | * kdb_register - Compatibility register function for commands that do | 2754 | * kdb_register - Compatibility register function for commands that do |
2708 | * not need to specify a repeat state. Equivalent to | 2755 | * not need to specify a repeat state. Equivalent to |
2709 | * kdb_register_repeat with KDB_REPEAT_NONE. | 2756 | * kdb_register_flags with flags set to 0. |
2710 | * Inputs: | 2757 | * Inputs: |
2711 | * cmd Command name | 2758 | * cmd Command name |
2712 | * func Function to execute the command | 2759 | * func Function to execute the command |
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd, | |||
2721 | char *help, | 2768 | char *help, |
2722 | short minlen) | 2769 | short minlen) |
2723 | { | 2770 | { |
2724 | return kdb_register_repeat(cmd, func, usage, help, minlen, | 2771 | return kdb_register_flags(cmd, func, usage, help, minlen, 0); |
2725 | KDB_REPEAT_NONE); | ||
2726 | } | 2772 | } |
2727 | EXPORT_SYMBOL_GPL(kdb_register); | 2773 | EXPORT_SYMBOL_GPL(kdb_register); |
2728 | 2774 | ||
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void) | |||
2764 | for_each_kdbcmd(kp, i) | 2810 | for_each_kdbcmd(kp, i) |
2765 | kp->cmd_name = NULL; | 2811 | kp->cmd_name = NULL; |
2766 | 2812 | ||
2767 | kdb_register_repeat("md", kdb_md, "<vaddr>", | 2813 | kdb_register_flags("md", kdb_md, "<vaddr>", |
2768 | "Display Memory Contents, also mdWcN, e.g. md8c1", 1, | 2814 | "Display Memory Contents, also mdWcN, e.g. md8c1", 1, |
2769 | KDB_REPEAT_NO_ARGS); | 2815 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
2770 | kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", | 2816 | kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>", |
2771 | "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); | 2817 | "Display Raw Memory", 0, |
2772 | kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", | 2818 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
2773 | "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); | 2819 | kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>", |
2774 | kdb_register_repeat("mds", kdb_md, "<vaddr>", | 2820 | "Display Physical Memory", 0, |
2775 | "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); | 2821 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
2776 | kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", | 2822 | kdb_register_flags("mds", kdb_md, "<vaddr>", |
2777 | "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); | 2823 | "Display Memory Symbolically", 0, |
2778 | kdb_register_repeat("go", kdb_go, "[<vaddr>]", | 2824 | KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); |
2779 | "Continue Execution", 1, KDB_REPEAT_NONE); | 2825 | kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>", |
2780 | kdb_register_repeat("rd", kdb_rd, "", | 2826 | "Modify Memory Contents", 0, |
2781 | "Display Registers", 0, KDB_REPEAT_NONE); | 2827 | KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS); |
2782 | kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", | 2828 | kdb_register_flags("go", kdb_go, "[<vaddr>]", |
2783 | "Modify Registers", 0, KDB_REPEAT_NONE); | 2829 | "Continue Execution", 1, |
2784 | kdb_register_repeat("ef", kdb_ef, "<vaddr>", | 2830 | KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); |
2785 | "Display exception frame", 0, KDB_REPEAT_NONE); | 2831 | kdb_register_flags("rd", kdb_rd, "", |
2786 | kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", | 2832 | "Display Registers", 0, |
2787 | "Stack traceback", 1, KDB_REPEAT_NONE); | 2833 | KDB_ENABLE_REG_READ); |
2788 | kdb_register_repeat("btp", kdb_bt, "<pid>", | 2834 | kdb_register_flags("rm", kdb_rm, "<reg> <contents>", |
2789 | "Display stack for process <pid>", 0, KDB_REPEAT_NONE); | 2835 | "Modify Registers", 0, |
2790 | kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", | 2836 | KDB_ENABLE_REG_WRITE); |
2791 | "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE); | 2837 | kdb_register_flags("ef", kdb_ef, "<vaddr>", |
2792 | kdb_register_repeat("btc", kdb_bt, "", | 2838 | "Display exception frame", 0, |
2793 | "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); | 2839 | KDB_ENABLE_MEM_READ); |
2794 | kdb_register_repeat("btt", kdb_bt, "<vaddr>", | 2840 | kdb_register_flags("bt", kdb_bt, "[<vaddr>]", |
2841 | "Stack traceback", 1, | ||
2842 | KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); | ||
2843 | kdb_register_flags("btp", kdb_bt, "<pid>", | ||
2844 | "Display stack for process <pid>", 0, | ||
2845 | KDB_ENABLE_INSPECT); | ||
2846 | kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", | ||
2847 | "Backtrace all processes matching state flag", 0, | ||
2848 | KDB_ENABLE_INSPECT); | ||
2849 | kdb_register_flags("btc", kdb_bt, "", | ||
2850 | "Backtrace current process on each cpu", 0, | ||
2851 | KDB_ENABLE_INSPECT); | ||
2852 | kdb_register_flags("btt", kdb_bt, "<vaddr>", | ||
2795 | "Backtrace process given its struct task address", 0, | 2853 | "Backtrace process given its struct task address", 0, |
2796 | KDB_REPEAT_NONE); | 2854 | KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); |
2797 | kdb_register_repeat("env", kdb_env, "", | 2855 | kdb_register_flags("env", kdb_env, "", |
2798 | "Show environment variables", 0, KDB_REPEAT_NONE); | 2856 | "Show environment variables", 0, |
2799 | kdb_register_repeat("set", kdb_set, "", | 2857 | KDB_ENABLE_ALWAYS_SAFE); |
2800 | "Set environment variables", 0, KDB_REPEAT_NONE); | 2858 | kdb_register_flags("set", kdb_set, "", |
2801 | kdb_register_repeat("help", kdb_help, "", | 2859 | "Set environment variables", 0, |
2802 | "Display Help Message", 1, KDB_REPEAT_NONE); | 2860 | KDB_ENABLE_ALWAYS_SAFE); |
2803 | kdb_register_repeat("?", kdb_help, "", | 2861 | kdb_register_flags("help", kdb_help, "", |
2804 | "Display Help Message", 0, KDB_REPEAT_NONE); | 2862 | "Display Help Message", 1, |
2805 | kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", | 2863 | KDB_ENABLE_ALWAYS_SAFE); |
2806 | "Switch to new cpu", 0, KDB_REPEAT_NONE); | 2864 | kdb_register_flags("?", kdb_help, "", |
2807 | kdb_register_repeat("kgdb", kdb_kgdb, "", | 2865 | "Display Help Message", 0, |
2808 | "Enter kgdb mode", 0, KDB_REPEAT_NONE); | 2866 | KDB_ENABLE_ALWAYS_SAFE); |
2809 | kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", | 2867 | kdb_register_flags("cpu", kdb_cpu, "<cpunum>", |
2810 | "Display active task list", 0, KDB_REPEAT_NONE); | 2868 | "Switch to new cpu", 0, |
2811 | kdb_register_repeat("pid", kdb_pid, "<pidnum>", | 2869 | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); |
2812 | "Switch to another task", 0, KDB_REPEAT_NONE); | 2870 | kdb_register_flags("kgdb", kdb_kgdb, "", |
2813 | kdb_register_repeat("reboot", kdb_reboot, "", | 2871 | "Enter kgdb mode", 0, 0); |
2814 | "Reboot the machine immediately", 0, KDB_REPEAT_NONE); | 2872 | kdb_register_flags("ps", kdb_ps, "[<flags>|A]", |
2873 | "Display active task list", 0, | ||
2874 | KDB_ENABLE_INSPECT); | ||
2875 | kdb_register_flags("pid", kdb_pid, "<pidnum>", | ||
2876 | "Switch to another task", 0, | ||
2877 | KDB_ENABLE_INSPECT); | ||
2878 | kdb_register_flags("reboot", kdb_reboot, "", | ||
2879 | "Reboot the machine immediately", 0, | ||
2880 | KDB_ENABLE_REBOOT); | ||
2815 | #if defined(CONFIG_MODULES) | 2881 | #if defined(CONFIG_MODULES) |
2816 | kdb_register_repeat("lsmod", kdb_lsmod, "", | 2882 | kdb_register_flags("lsmod", kdb_lsmod, "", |
2817 | "List loaded kernel modules", 0, KDB_REPEAT_NONE); | 2883 | "List loaded kernel modules", 0, |
2884 | KDB_ENABLE_INSPECT); | ||
2818 | #endif | 2885 | #endif |
2819 | #if defined(CONFIG_MAGIC_SYSRQ) | 2886 | #if defined(CONFIG_MAGIC_SYSRQ) |
2820 | kdb_register_repeat("sr", kdb_sr, "<key>", | 2887 | kdb_register_flags("sr", kdb_sr, "<key>", |
2821 | "Magic SysRq key", 0, KDB_REPEAT_NONE); | 2888 | "Magic SysRq key", 0, |
2889 | KDB_ENABLE_ALWAYS_SAFE); | ||
2822 | #endif | 2890 | #endif |
2823 | #if defined(CONFIG_PRINTK) | 2891 | #if defined(CONFIG_PRINTK) |
2824 | kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", | 2892 | kdb_register_flags("dmesg", kdb_dmesg, "[lines]", |
2825 | "Display syslog buffer", 0, KDB_REPEAT_NONE); | 2893 | "Display syslog buffer", 0, |
2894 | KDB_ENABLE_ALWAYS_SAFE); | ||
2826 | #endif | 2895 | #endif |
2827 | if (arch_kgdb_ops.enable_nmi) { | 2896 | if (arch_kgdb_ops.enable_nmi) { |
2828 | kdb_register_repeat("disable_nmi", kdb_disable_nmi, "", | 2897 | kdb_register_flags("disable_nmi", kdb_disable_nmi, "", |
2829 | "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE); | 2898 | "Disable NMI entry to KDB", 0, |
2830 | } | 2899 | KDB_ENABLE_ALWAYS_SAFE); |
2831 | kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", | 2900 | } |
2832 | "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); | 2901 | kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"", |
2833 | kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", | 2902 | "Define a set of commands, down to endefcmd", 0, |
2834 | "Send a signal to a process", 0, KDB_REPEAT_NONE); | 2903 | KDB_ENABLE_ALWAYS_SAFE); |
2835 | kdb_register_repeat("summary", kdb_summary, "", | 2904 | kdb_register_flags("kill", kdb_kill, "<-signal> <pid>", |
2836 | "Summarize the system", 4, KDB_REPEAT_NONE); | 2905 | "Send a signal to a process", 0, |
2837 | kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", | 2906 | KDB_ENABLE_SIGNAL); |
2838 | "Display per_cpu variables", 3, KDB_REPEAT_NONE); | 2907 | kdb_register_flags("summary", kdb_summary, "", |
2839 | kdb_register_repeat("grephelp", kdb_grep_help, "", | 2908 | "Summarize the system", 4, |
2840 | "Display help on | grep", 0, KDB_REPEAT_NONE); | 2909 | KDB_ENABLE_ALWAYS_SAFE); |
2910 | kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", | ||
2911 | "Display per_cpu variables", 3, | ||
2912 | KDB_ENABLE_MEM_READ); | ||
2913 | kdb_register_flags("grephelp", kdb_grep_help, "", | ||
2914 | "Display help on | grep", 0, | ||
2915 | KDB_ENABLE_ALWAYS_SAFE); | ||
2841 | } | 2916 | } |
2842 | 2917 | ||
2843 | /* Execute any commands defined in kdb_cmds. */ | 2918 | /* Execute any commands defined in kdb_cmds. */ |
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 7afd3c8c41d5..eaacd1693954 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h | |||
@@ -172,10 +172,9 @@ typedef struct _kdbtab { | |||
172 | kdb_func_t cmd_func; /* Function to execute command */ | 172 | kdb_func_t cmd_func; /* Function to execute command */ |
173 | char *cmd_usage; /* Usage String for this command */ | 173 | char *cmd_usage; /* Usage String for this command */ |
174 | char *cmd_help; /* Help message for this command */ | 174 | char *cmd_help; /* Help message for this command */ |
175 | short cmd_flags; /* Parsing flags */ | ||
176 | short cmd_minlen; /* Minimum legal # command | 175 | short cmd_minlen; /* Minimum legal # command |
177 | * chars required */ | 176 | * chars required */ |
178 | kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ | 177 | kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ |
179 | } kdbtab_t; | 178 | } kdbtab_t; |
180 | 179 | ||
181 | extern int kdb_bt(int, const char **); /* KDB display back trace */ | 180 | extern int kdb_bt(int, const char **); /* KDB display back trace */ |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 4c1ee7f2bebc..882f835a0d85 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle, | |||
4461 | } | 4461 | } |
4462 | 4462 | ||
4463 | static void perf_sample_regs_user(struct perf_regs *regs_user, | 4463 | static void perf_sample_regs_user(struct perf_regs *regs_user, |
4464 | struct pt_regs *regs) | 4464 | struct pt_regs *regs, |
4465 | struct pt_regs *regs_user_copy) | ||
4465 | { | 4466 | { |
4466 | if (!user_mode(regs)) { | 4467 | if (user_mode(regs)) { |
4467 | if (current->mm) | 4468 | regs_user->abi = perf_reg_abi(current); |
4468 | regs = task_pt_regs(current); | ||
4469 | else | ||
4470 | regs = NULL; | ||
4471 | } | ||
4472 | |||
4473 | if (regs) { | ||
4474 | regs_user->abi = perf_reg_abi(current); | ||
4475 | regs_user->regs = regs; | 4469 | regs_user->regs = regs; |
4470 | } else if (current->mm) { | ||
4471 | perf_get_regs_user(regs_user, regs, regs_user_copy); | ||
4476 | } else { | 4472 | } else { |
4477 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; | 4473 | regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; |
4478 | regs_user->regs = NULL; | 4474 | regs_user->regs = NULL; |
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header, | |||
4951 | } | 4947 | } |
4952 | 4948 | ||
4953 | if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) | 4949 | if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) |
4954 | perf_sample_regs_user(&data->regs_user, regs); | 4950 | perf_sample_regs_user(&data->regs_user, regs, |
4951 | &data->regs_user_copy); | ||
4955 | 4952 | ||
4956 | if (sample_type & PERF_SAMPLE_REGS_USER) { | 4953 | if (sample_type & PERF_SAMPLE_REGS_USER) { |
4957 | /* regs dump ABI info */ | 4954 | /* regs dump ABI info */ |
diff --git a/kernel/exit.c b/kernel/exit.c index 1ea4369890a3..6806c55475ee 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
1287 | static int wait_consider_task(struct wait_opts *wo, int ptrace, | 1287 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
1288 | struct task_struct *p) | 1288 | struct task_struct *p) |
1289 | { | 1289 | { |
1290 | /* | ||
1291 | * We can race with wait_task_zombie() from another thread. | ||
1292 | * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition | ||
1293 | * can't confuse the checks below. | ||
1294 | */ | ||
1295 | int exit_state = ACCESS_ONCE(p->exit_state); | ||
1290 | int ret; | 1296 | int ret; |
1291 | 1297 | ||
1292 | if (unlikely(p->exit_state == EXIT_DEAD)) | 1298 | if (unlikely(exit_state == EXIT_DEAD)) |
1293 | return 0; | 1299 | return 0; |
1294 | 1300 | ||
1295 | ret = eligible_child(wo, p); | 1301 | ret = eligible_child(wo, p); |
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
1310 | return 0; | 1316 | return 0; |
1311 | } | 1317 | } |
1312 | 1318 | ||
1313 | if (unlikely(p->exit_state == EXIT_TRACE)) { | 1319 | if (unlikely(exit_state == EXIT_TRACE)) { |
1314 | /* | 1320 | /* |
1315 | * ptrace == 0 means we are the natural parent. In this case | 1321 | * ptrace == 0 means we are the natural parent. In this case |
1316 | * we should clear notask_error, debugger will notify us. | 1322 | * we should clear notask_error, debugger will notify us. |
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
1337 | } | 1343 | } |
1338 | 1344 | ||
1339 | /* slay zombie? */ | 1345 | /* slay zombie? */ |
1340 | if (p->exit_state == EXIT_ZOMBIE) { | 1346 | if (exit_state == EXIT_ZOMBIE) { |
1341 | /* we don't reap group leaders with subthreads */ | 1347 | /* we don't reap group leaders with subthreads */ |
1342 | if (!delay_group_leader(p)) { | 1348 | if (!delay_group_leader(p)) { |
1343 | /* | 1349 | /* |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 06f58309fed2..ee619929cf90 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void) | |||
127 | 127 | ||
128 | static void free_insn_page(void *page) | 128 | static void free_insn_page(void *page) |
129 | { | 129 | { |
130 | module_free(NULL, page); | 130 | module_memfree(page); |
131 | } | 131 | } |
132 | 132 | ||
133 | struct kprobe_insn_cache kprobe_insn_slots = { | 133 | struct kprobe_insn_cache kprobe_insn_slots = { |
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index 5cf6731b98e9..3ef3736002d8 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c | |||
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock) | |||
80 | DEBUG_LOCKS_WARN_ON(lock->owner != current); | 80 | DEBUG_LOCKS_WARN_ON(lock->owner != current); |
81 | 81 | ||
82 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 82 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
83 | mutex_clear_owner(lock); | ||
84 | } | 83 | } |
85 | 84 | ||
86 | /* | 85 | /* |
87 | * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug | 86 | * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug |
88 | * mutexes so that we can do it here after we've verified state. | 87 | * mutexes so that we can do it here after we've verified state. |
89 | */ | 88 | */ |
89 | mutex_clear_owner(lock); | ||
90 | atomic_set(&lock->count, 1); | 90 | atomic_set(&lock->count, 1); |
91 | } | 91 | } |
92 | 92 | ||
diff --git a/kernel/module.c b/kernel/module.c index 3965511ae133..d856e96a3cce 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced) | |||
772 | return 0; | 772 | return 0; |
773 | } | 773 | } |
774 | 774 | ||
775 | unsigned long module_refcount(struct module *mod) | 775 | /** |
776 | * module_refcount - return the refcount or -1 if unloading | ||
777 | * | ||
778 | * @mod: the module we're checking | ||
779 | * | ||
780 | * Returns: | ||
781 | * -1 if the module is in the process of unloading | ||
782 | * otherwise the number of references in the kernel to the module | ||
783 | */ | ||
784 | int module_refcount(struct module *mod) | ||
776 | { | 785 | { |
777 | return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE; | 786 | return atomic_read(&mod->refcnt) - MODULE_REF_BASE; |
778 | } | 787 | } |
779 | EXPORT_SYMBOL(module_refcount); | 788 | EXPORT_SYMBOL(module_refcount); |
780 | 789 | ||
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod) | |||
856 | struct module_use *use; | 865 | struct module_use *use; |
857 | int printed_something = 0; | 866 | int printed_something = 0; |
858 | 867 | ||
859 | seq_printf(m, " %lu ", module_refcount(mod)); | 868 | seq_printf(m, " %i ", module_refcount(mod)); |
860 | 869 | ||
861 | /* | 870 | /* |
862 | * Always include a trailing , so userspace can differentiate | 871 | * Always include a trailing , so userspace can differentiate |
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr); | |||
908 | static ssize_t show_refcnt(struct module_attribute *mattr, | 917 | static ssize_t show_refcnt(struct module_attribute *mattr, |
909 | struct module_kobject *mk, char *buffer) | 918 | struct module_kobject *mk, char *buffer) |
910 | { | 919 | { |
911 | return sprintf(buffer, "%lu\n", module_refcount(mk->mod)); | 920 | return sprintf(buffer, "%i\n", module_refcount(mk->mod)); |
912 | } | 921 | } |
913 | 922 | ||
914 | static struct module_attribute modinfo_refcnt = | 923 | static struct module_attribute modinfo_refcnt = |
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { } | |||
1795 | static void unset_module_init_ro_nx(struct module *mod) { } | 1804 | static void unset_module_init_ro_nx(struct module *mod) { } |
1796 | #endif | 1805 | #endif |
1797 | 1806 | ||
1798 | void __weak module_free(struct module *mod, void *module_region) | 1807 | void __weak module_memfree(void *module_region) |
1799 | { | 1808 | { |
1800 | vfree(module_region); | 1809 | vfree(module_region); |
1801 | } | 1810 | } |
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod) | |||
1804 | { | 1813 | { |
1805 | } | 1814 | } |
1806 | 1815 | ||
1816 | void __weak module_arch_freeing_init(struct module *mod) | ||
1817 | { | ||
1818 | } | ||
1819 | |||
1807 | /* Free a module, remove from lists, etc. */ | 1820 | /* Free a module, remove from lists, etc. */ |
1808 | static void free_module(struct module *mod) | 1821 | static void free_module(struct module *mod) |
1809 | { | 1822 | { |
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod) | |||
1841 | 1854 | ||
1842 | /* This may be NULL, but that's OK */ | 1855 | /* This may be NULL, but that's OK */ |
1843 | unset_module_init_ro_nx(mod); | 1856 | unset_module_init_ro_nx(mod); |
1844 | module_free(mod, mod->module_init); | 1857 | module_arch_freeing_init(mod); |
1858 | module_memfree(mod->module_init); | ||
1845 | kfree(mod->args); | 1859 | kfree(mod->args); |
1846 | percpu_modfree(mod); | 1860 | percpu_modfree(mod); |
1847 | 1861 | ||
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod) | |||
1850 | 1864 | ||
1851 | /* Finally, free the core (containing the module structure) */ | 1865 | /* Finally, free the core (containing the module structure) */ |
1852 | unset_module_core_ro_nx(mod); | 1866 | unset_module_core_ro_nx(mod); |
1853 | module_free(mod, mod->module_core); | 1867 | module_memfree(mod->module_core); |
1854 | 1868 | ||
1855 | #ifdef CONFIG_MPU | 1869 | #ifdef CONFIG_MPU |
1856 | update_protections(current->mm); | 1870 | update_protections(current->mm); |
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info) | |||
2785 | */ | 2799 | */ |
2786 | kmemleak_ignore(ptr); | 2800 | kmemleak_ignore(ptr); |
2787 | if (!ptr) { | 2801 | if (!ptr) { |
2788 | module_free(mod, mod->module_core); | 2802 | module_memfree(mod->module_core); |
2789 | return -ENOMEM; | 2803 | return -ENOMEM; |
2790 | } | 2804 | } |
2791 | memset(ptr, 0, mod->init_size); | 2805 | memset(ptr, 0, mod->init_size); |
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags) | |||
2930 | static void module_deallocate(struct module *mod, struct load_info *info) | 2944 | static void module_deallocate(struct module *mod, struct load_info *info) |
2931 | { | 2945 | { |
2932 | percpu_modfree(mod); | 2946 | percpu_modfree(mod); |
2933 | module_free(mod, mod->module_init); | 2947 | module_arch_freeing_init(mod); |
2934 | module_free(mod, mod->module_core); | 2948 | module_memfree(mod->module_init); |
2949 | module_memfree(mod->module_core); | ||
2935 | } | 2950 | } |
2936 | 2951 | ||
2937 | int __weak module_finalize(const Elf_Ehdr *hdr, | 2952 | int __weak module_finalize(const Elf_Ehdr *hdr, |
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod) | |||
2983 | #endif | 2998 | #endif |
2984 | } | 2999 | } |
2985 | 3000 | ||
3001 | /* For freeing module_init on success, in case kallsyms traversing */ | ||
3002 | struct mod_initfree { | ||
3003 | struct rcu_head rcu; | ||
3004 | void *module_init; | ||
3005 | }; | ||
3006 | |||
3007 | static void do_free_init(struct rcu_head *head) | ||
3008 | { | ||
3009 | struct mod_initfree *m = container_of(head, struct mod_initfree, rcu); | ||
3010 | module_memfree(m->module_init); | ||
3011 | kfree(m); | ||
3012 | } | ||
3013 | |||
2986 | /* This is where the real work happens */ | 3014 | /* This is where the real work happens */ |
2987 | static int do_init_module(struct module *mod) | 3015 | static int do_init_module(struct module *mod) |
2988 | { | 3016 | { |
2989 | int ret = 0; | 3017 | int ret = 0; |
3018 | struct mod_initfree *freeinit; | ||
3019 | |||
3020 | freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL); | ||
3021 | if (!freeinit) { | ||
3022 | ret = -ENOMEM; | ||
3023 | goto fail; | ||
3024 | } | ||
3025 | freeinit->module_init = mod->module_init; | ||
2990 | 3026 | ||
2991 | /* | 3027 | /* |
2992 | * We want to find out whether @mod uses async during init. Clear | 3028 | * We want to find out whether @mod uses async during init. Clear |
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod) | |||
2999 | if (mod->init != NULL) | 3035 | if (mod->init != NULL) |
3000 | ret = do_one_initcall(mod->init); | 3036 | ret = do_one_initcall(mod->init); |
3001 | if (ret < 0) { | 3037 | if (ret < 0) { |
3002 | /* | 3038 | goto fail_free_freeinit; |
3003 | * Init routine failed: abort. Try to protect us from | ||
3004 | * buggy refcounters. | ||
3005 | */ | ||
3006 | mod->state = MODULE_STATE_GOING; | ||
3007 | synchronize_sched(); | ||
3008 | module_put(mod); | ||
3009 | blocking_notifier_call_chain(&module_notify_list, | ||
3010 | MODULE_STATE_GOING, mod); | ||
3011 | free_module(mod); | ||
3012 | wake_up_all(&module_wq); | ||
3013 | return ret; | ||
3014 | } | 3039 | } |
3015 | if (ret > 0) { | 3040 | if (ret > 0) { |
3016 | pr_warn("%s: '%s'->init suspiciously returned %d, it should " | 3041 | pr_warn("%s: '%s'->init suspiciously returned %d, it should " |
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod) | |||
3055 | mod->strtab = mod->core_strtab; | 3080 | mod->strtab = mod->core_strtab; |
3056 | #endif | 3081 | #endif |
3057 | unset_module_init_ro_nx(mod); | 3082 | unset_module_init_ro_nx(mod); |
3058 | module_free(mod, mod->module_init); | 3083 | module_arch_freeing_init(mod); |
3059 | mod->module_init = NULL; | 3084 | mod->module_init = NULL; |
3060 | mod->init_size = 0; | 3085 | mod->init_size = 0; |
3061 | mod->init_ro_size = 0; | 3086 | mod->init_ro_size = 0; |
3062 | mod->init_text_size = 0; | 3087 | mod->init_text_size = 0; |
3088 | /* | ||
3089 | * We want to free module_init, but be aware that kallsyms may be | ||
3090 | * walking this with preempt disabled. In all the failure paths, | ||
3091 | * we call synchronize_rcu/synchronize_sched, but we don't want | ||
3092 | * to slow down the success path, so use actual RCU here. | ||
3093 | */ | ||
3094 | call_rcu(&freeinit->rcu, do_free_init); | ||
3063 | mutex_unlock(&module_mutex); | 3095 | mutex_unlock(&module_mutex); |
3064 | wake_up_all(&module_wq); | 3096 | wake_up_all(&module_wq); |
3065 | 3097 | ||
3066 | return 0; | 3098 | return 0; |
3099 | |||
3100 | fail_free_freeinit: | ||
3101 | kfree(freeinit); | ||
3102 | fail: | ||
3103 | /* Try to protect us from buggy refcounters. */ | ||
3104 | mod->state = MODULE_STATE_GOING; | ||
3105 | synchronize_sched(); | ||
3106 | module_put(mod); | ||
3107 | blocking_notifier_call_chain(&module_notify_list, | ||
3108 | MODULE_STATE_GOING, mod); | ||
3109 | free_module(mod); | ||
3110 | wake_up_all(&module_wq); | ||
3111 | return ret; | ||
3067 | } | 3112 | } |
3068 | 3113 | ||
3069 | static int may_init_module(void) | 3114 | static int may_init_module(void) |
diff --git a/kernel/params.c b/kernel/params.c index 0af9b2c4e56c..728e05b167de 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk, | |||
642 | mk->mp->grp.attrs = new_attrs; | 642 | mk->mp->grp.attrs = new_attrs; |
643 | 643 | ||
644 | /* Tack new one on the end. */ | 644 | /* Tack new one on the end. */ |
645 | memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0])); | ||
645 | sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr); | 646 | sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr); |
646 | mk->mp->attrs[mk->mp->num].param = kp; | 647 | mk->mp->attrs[mk->mp->num].param = kp; |
647 | mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show; | 648 | mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show; |
648 | /* Do not allow runtime DAC changes to make param writable. */ | 649 | /* Do not allow runtime DAC changes to make param writable. */ |
649 | if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) | 650 | if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0) |
650 | mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store; | 651 | mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store; |
652 | else | ||
653 | mk->mp->attrs[mk->mp->num].mattr.store = NULL; | ||
651 | mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name; | 654 | mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name; |
652 | mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm; | 655 | mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm; |
653 | mk->mp->num++; | 656 | mk->mp->num++; |
diff --git a/kernel/range.c b/kernel/range.c index 322ea8e93e4b..82cfc285b046 100644 --- a/kernel/range.c +++ b/kernel/range.c | |||
@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2) | |||
113 | { | 113 | { |
114 | const struct range *r1 = x1; | 114 | const struct range *r1 = x1; |
115 | const struct range *r2 = x2; | 115 | const struct range *r2 = x2; |
116 | s64 start1, start2; | ||
117 | 116 | ||
118 | start1 = r1->start; | 117 | if (r1->start < r2->start) |
119 | start2 = r2->start; | 118 | return -1; |
120 | 119 | if (r1->start > r2->start) | |
121 | return start1 - start2; | 120 | return 1; |
121 | return 0; | ||
122 | } | 122 | } |
123 | 123 | ||
124 | int clean_sort_range(struct range *range, int az) | 124 | int clean_sort_range(struct range *range, int az) |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b5797b78add6..c0accc00566e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -7113,9 +7113,6 @@ void __init sched_init(void) | |||
7113 | #ifdef CONFIG_RT_GROUP_SCHED | 7113 | #ifdef CONFIG_RT_GROUP_SCHED |
7114 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); | 7114 | alloc_size += 2 * nr_cpu_ids * sizeof(void **); |
7115 | #endif | 7115 | #endif |
7116 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
7117 | alloc_size += num_possible_cpus() * cpumask_size(); | ||
7118 | #endif | ||
7119 | if (alloc_size) { | 7116 | if (alloc_size) { |
7120 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); | 7117 | ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); |
7121 | 7118 | ||
@@ -7135,13 +7132,13 @@ void __init sched_init(void) | |||
7135 | ptr += nr_cpu_ids * sizeof(void **); | 7132 | ptr += nr_cpu_ids * sizeof(void **); |
7136 | 7133 | ||
7137 | #endif /* CONFIG_RT_GROUP_SCHED */ | 7134 | #endif /* CONFIG_RT_GROUP_SCHED */ |
7135 | } | ||
7138 | #ifdef CONFIG_CPUMASK_OFFSTACK | 7136 | #ifdef CONFIG_CPUMASK_OFFSTACK |
7139 | for_each_possible_cpu(i) { | 7137 | for_each_possible_cpu(i) { |
7140 | per_cpu(load_balance_mask, i) = (void *)ptr; | 7138 | per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( |
7141 | ptr += cpumask_size(); | 7139 | cpumask_size(), GFP_KERNEL, cpu_to_node(i)); |
7142 | } | ||
7143 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
7144 | } | 7140 | } |
7141 | #endif /* CONFIG_CPUMASK_OFFSTACK */ | ||
7145 | 7142 | ||
7146 | init_rt_bandwidth(&def_rt_bandwidth, | 7143 | init_rt_bandwidth(&def_rt_bandwidth, |
7147 | global_rt_period(), global_rt_runtime()); | 7144 | global_rt_period(), global_rt_runtime()); |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e5db8c6feebd..b52092f2636d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) | |||
570 | static | 570 | static |
571 | int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) | 571 | int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) |
572 | { | 572 | { |
573 | int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); | 573 | return (dl_se->runtime <= 0); |
574 | int rorun = dl_se->runtime <= 0; | ||
575 | |||
576 | if (!rorun && !dmiss) | ||
577 | return 0; | ||
578 | |||
579 | /* | ||
580 | * If we are beyond our current deadline and we are still | ||
581 | * executing, then we have already used some of the runtime of | ||
582 | * the next instance. Thus, if we do not account that, we are | ||
583 | * stealing bandwidth from the system at each deadline miss! | ||
584 | */ | ||
585 | if (dmiss) { | ||
586 | dl_se->runtime = rorun ? dl_se->runtime : 0; | ||
587 | dl_se->runtime -= rq_clock(rq) - dl_se->deadline; | ||
588 | } | ||
589 | |||
590 | return 1; | ||
591 | } | 574 | } |
592 | 575 | ||
593 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); | 576 | extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); |
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, | |||
826 | * parameters of the task might need updating. Otherwise, | 809 | * parameters of the task might need updating. Otherwise, |
827 | * we want a replenishment of its runtime. | 810 | * we want a replenishment of its runtime. |
828 | */ | 811 | */ |
829 | if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) | 812 | if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) |
830 | replenish_dl_entity(dl_se, pi_se); | ||
831 | else | ||
832 | update_dl_entity(dl_se, pi_se); | 813 | update_dl_entity(dl_se, pi_se); |
814 | else if (flags & ENQUEUE_REPLENISH) | ||
815 | replenish_dl_entity(dl_se, pi_se); | ||
833 | 816 | ||
834 | __enqueue_dl_entity(dl_se); | 817 | __enqueue_dl_entity(dl_se); |
835 | } | 818 | } |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index df2cdf77f899..40667cbf371b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force) | |||
4005 | 4005 | ||
4006 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) | 4006 | static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) |
4007 | { | 4007 | { |
4008 | /* init_cfs_bandwidth() was not called */ | ||
4009 | if (!cfs_b->throttled_cfs_rq.next) | ||
4010 | return; | ||
4011 | |||
4008 | hrtimer_cancel(&cfs_b->period_timer); | 4012 | hrtimer_cancel(&cfs_b->period_timer); |
4009 | hrtimer_cancel(&cfs_b->slack_timer); | 4013 | hrtimer_cancel(&cfs_b->slack_timer); |
4010 | } | 4014 | } |
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) | |||
4424 | * wl = S * s'_i; see (2) | 4428 | * wl = S * s'_i; see (2) |
4425 | */ | 4429 | */ |
4426 | if (W > 0 && w < W) | 4430 | if (W > 0 && w < W) |
4427 | wl = (w * tg->shares) / W; | 4431 | wl = (w * (long)tg->shares) / W; |
4428 | else | 4432 | else |
4429 | wl = tg->shares; | 4433 | wl = tg->shares; |
4430 | 4434 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index a8c9f5a7dda6..ea9c88109894 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, | |||
2210 | up_write(&me->mm->mmap_sem); | 2210 | up_write(&me->mm->mmap_sem); |
2211 | break; | 2211 | break; |
2212 | case PR_MPX_ENABLE_MANAGEMENT: | 2212 | case PR_MPX_ENABLE_MANAGEMENT: |
2213 | if (arg2 || arg3 || arg4 || arg5) | ||
2214 | return -EINVAL; | ||
2213 | error = MPX_ENABLE_MANAGEMENT(me); | 2215 | error = MPX_ENABLE_MANAGEMENT(me); |
2214 | break; | 2216 | break; |
2215 | case PR_MPX_DISABLE_MANAGEMENT: | 2217 | case PR_MPX_DISABLE_MANAGEMENT: |
2218 | if (arg2 || arg3 || arg4 || arg5) | ||
2219 | return -EINVAL; | ||
2216 | error = MPX_DISABLE_MANAGEMENT(me); | 2220 | error = MPX_DISABLE_MANAGEMENT(me); |
2217 | break; | 2221 | break; |
2218 | default: | 2222 | default: |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 87a346fd6d61..28bf91c60a0b 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc) | |||
633 | if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) | 633 | if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) |
634 | return -EPERM; | 634 | return -EPERM; |
635 | 635 | ||
636 | if (txc->modes & ADJ_FREQUENCY) { | ||
637 | if (LONG_MIN / PPM_SCALE > txc->freq) | ||
638 | return -EINVAL; | ||
639 | if (LONG_MAX / PPM_SCALE < txc->freq) | ||
640 | return -EINVAL; | ||
641 | } | ||
642 | |||
636 | return 0; | 643 | return 0; |
637 | } | 644 | } |
638 | 645 | ||
diff --git a/kernel/time/time.c b/kernel/time/time.c index 6390517e77d4..2c85b7724af4 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c | |||
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, | |||
196 | if (tv) { | 196 | if (tv) { |
197 | if (copy_from_user(&user_tv, tv, sizeof(*tv))) | 197 | if (copy_from_user(&user_tv, tv, sizeof(*tv))) |
198 | return -EFAULT; | 198 | return -EFAULT; |
199 | |||
200 | if (!timeval_valid(&user_tv)) | ||
201 | return -EINVAL; | ||
202 | |||
199 | new_ts.tv_sec = user_tv.tv_sec; | 203 | new_ts.tv_sec = user_tv.tv_sec; |
200 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; | 204 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; |
201 | } | 205 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 929a733d302e..224e768bdc73 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command) | |||
2497 | } | 2497 | } |
2498 | 2498 | ||
2499 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, | 2499 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
2500 | struct ftrace_hash *old_hash) | 2500 | struct ftrace_ops_hash *old_hash) |
2501 | { | 2501 | { |
2502 | ops->flags |= FTRACE_OPS_FL_MODIFYING; | 2502 | ops->flags |= FTRACE_OPS_FL_MODIFYING; |
2503 | ops->old_hash.filter_hash = old_hash; | 2503 | ops->old_hash.filter_hash = old_hash->filter_hash; |
2504 | ops->old_hash.notrace_hash = old_hash->notrace_hash; | ||
2504 | ftrace_run_update_code(command); | 2505 | ftrace_run_update_code(command); |
2505 | ops->old_hash.filter_hash = NULL; | 2506 | ops->old_hash.filter_hash = NULL; |
2507 | ops->old_hash.notrace_hash = NULL; | ||
2506 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; | 2508 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; |
2507 | } | 2509 | } |
2508 | 2510 | ||
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
3579 | 3581 | ||
3580 | static int ftrace_probe_registered; | 3582 | static int ftrace_probe_registered; |
3581 | 3583 | ||
3582 | static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) | 3584 | static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) |
3583 | { | 3585 | { |
3584 | int ret; | 3586 | int ret; |
3585 | int i; | 3587 | int i; |
@@ -3637,6 +3639,7 @@ int | |||
3637 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | 3639 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, |
3638 | void *data) | 3640 | void *data) |
3639 | { | 3641 | { |
3642 | struct ftrace_ops_hash old_hash_ops; | ||
3640 | struct ftrace_func_probe *entry; | 3643 | struct ftrace_func_probe *entry; |
3641 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | 3644 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
3642 | struct ftrace_hash *old_hash = *orig_hash; | 3645 | struct ftrace_hash *old_hash = *orig_hash; |
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3658 | 3661 | ||
3659 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | 3662 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
3660 | 3663 | ||
3664 | old_hash_ops.filter_hash = old_hash; | ||
3665 | /* Probes only have filters */ | ||
3666 | old_hash_ops.notrace_hash = NULL; | ||
3667 | |||
3661 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); | 3668 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); |
3662 | if (!hash) { | 3669 | if (!hash) { |
3663 | count = -ENOMEM; | 3670 | count = -ENOMEM; |
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3718 | 3725 | ||
3719 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3726 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
3720 | 3727 | ||
3721 | __enable_ftrace_function_probe(old_hash); | 3728 | __enable_ftrace_function_probe(&old_hash_ops); |
3722 | 3729 | ||
3723 | if (!ret) | 3730 | if (!ret) |
3724 | free_ftrace_hash_rcu(old_hash); | 3731 | free_ftrace_hash_rcu(old_hash); |
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
4006 | } | 4013 | } |
4007 | 4014 | ||
4008 | static void ftrace_ops_update_code(struct ftrace_ops *ops, | 4015 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
4009 | struct ftrace_hash *old_hash) | 4016 | struct ftrace_ops_hash *old_hash) |
4010 | { | 4017 | { |
4011 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | 4018 | struct ftrace_ops *op; |
4019 | |||
4020 | if (!ftrace_enabled) | ||
4021 | return; | ||
4022 | |||
4023 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { | ||
4012 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); | 4024 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); |
4025 | return; | ||
4026 | } | ||
4027 | |||
4028 | /* | ||
4029 | * If this is the shared global_ops filter, then we need to | ||
4030 | * check if there is another ops that shares it, is enabled. | ||
4031 | * If so, we still need to run the modify code. | ||
4032 | */ | ||
4033 | if (ops->func_hash != &global_ops.local_hash) | ||
4034 | return; | ||
4035 | |||
4036 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
4037 | if (op->func_hash == &global_ops.local_hash && | ||
4038 | op->flags & FTRACE_OPS_FL_ENABLED) { | ||
4039 | ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); | ||
4040 | /* Only need to do this once */ | ||
4041 | return; | ||
4042 | } | ||
4043 | } while_for_each_ftrace_op(op); | ||
4013 | } | 4044 | } |
4014 | 4045 | ||
4015 | static int | 4046 | static int |
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
4017 | unsigned long ip, int remove, int reset, int enable) | 4048 | unsigned long ip, int remove, int reset, int enable) |
4018 | { | 4049 | { |
4019 | struct ftrace_hash **orig_hash; | 4050 | struct ftrace_hash **orig_hash; |
4051 | struct ftrace_ops_hash old_hash_ops; | ||
4020 | struct ftrace_hash *old_hash; | 4052 | struct ftrace_hash *old_hash; |
4021 | struct ftrace_hash *hash; | 4053 | struct ftrace_hash *hash; |
4022 | int ret; | 4054 | int ret; |
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
4053 | 4085 | ||
4054 | mutex_lock(&ftrace_lock); | 4086 | mutex_lock(&ftrace_lock); |
4055 | old_hash = *orig_hash; | 4087 | old_hash = *orig_hash; |
4088 | old_hash_ops.filter_hash = ops->func_hash->filter_hash; | ||
4089 | old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; | ||
4056 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 4090 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
4057 | if (!ret) { | 4091 | if (!ret) { |
4058 | ftrace_ops_update_code(ops, old_hash); | 4092 | ftrace_ops_update_code(ops, &old_hash_ops); |
4059 | free_ftrace_hash_rcu(old_hash); | 4093 | free_ftrace_hash_rcu(old_hash); |
4060 | } | 4094 | } |
4061 | mutex_unlock(&ftrace_lock); | 4095 | mutex_unlock(&ftrace_lock); |
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void) | |||
4267 | int ftrace_regex_release(struct inode *inode, struct file *file) | 4301 | int ftrace_regex_release(struct inode *inode, struct file *file) |
4268 | { | 4302 | { |
4269 | struct seq_file *m = (struct seq_file *)file->private_data; | 4303 | struct seq_file *m = (struct seq_file *)file->private_data; |
4304 | struct ftrace_ops_hash old_hash_ops; | ||
4270 | struct ftrace_iterator *iter; | 4305 | struct ftrace_iterator *iter; |
4271 | struct ftrace_hash **orig_hash; | 4306 | struct ftrace_hash **orig_hash; |
4272 | struct ftrace_hash *old_hash; | 4307 | struct ftrace_hash *old_hash; |
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
4300 | 4335 | ||
4301 | mutex_lock(&ftrace_lock); | 4336 | mutex_lock(&ftrace_lock); |
4302 | old_hash = *orig_hash; | 4337 | old_hash = *orig_hash; |
4338 | old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; | ||
4339 | old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; | ||
4303 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4340 | ret = ftrace_hash_move(iter->ops, filter_hash, |
4304 | orig_hash, iter->hash); | 4341 | orig_hash, iter->hash); |
4305 | if (!ret) { | 4342 | if (!ret) { |
4306 | ftrace_ops_update_code(iter->ops, old_hash); | 4343 | ftrace_ops_update_code(iter->ops, &old_hash_ops); |
4307 | free_ftrace_hash_rcu(old_hash); | 4344 | free_ftrace_hash_rcu(old_hash); |
4308 | } | 4345 | } |
4309 | mutex_unlock(&ftrace_lock); | 4346 | mutex_unlock(&ftrace_lock); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2e767972e99c..4a9079b9f082 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -6918,7 +6918,6 @@ void __init trace_init(void) | |||
6918 | tracepoint_printk = 0; | 6918 | tracepoint_printk = 0; |
6919 | } | 6919 | } |
6920 | tracer_alloc_buffers(); | 6920 | tracer_alloc_buffers(); |
6921 | init_ftrace_syscalls(); | ||
6922 | trace_event_init(); | 6921 | trace_event_init(); |
6923 | } | 6922 | } |
6924 | 6923 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 366a78a3e61e..b03a0ea77b99 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void) | |||
2429 | return 0; | 2429 | return 0; |
2430 | } | 2430 | } |
2431 | 2431 | ||
2432 | static __init void | ||
2433 | early_enable_events(struct trace_array *tr, bool disable_first) | ||
2434 | { | ||
2435 | char *buf = bootup_event_buf; | ||
2436 | char *token; | ||
2437 | int ret; | ||
2438 | |||
2439 | while (true) { | ||
2440 | token = strsep(&buf, ","); | ||
2441 | |||
2442 | if (!token) | ||
2443 | break; | ||
2444 | if (!*token) | ||
2445 | continue; | ||
2446 | |||
2447 | /* Restarting syscalls requires that we stop them first */ | ||
2448 | if (disable_first) | ||
2449 | ftrace_set_clr_event(tr, token, 0); | ||
2450 | |||
2451 | ret = ftrace_set_clr_event(tr, token, 1); | ||
2452 | if (ret) | ||
2453 | pr_warn("Failed to enable trace event: %s\n", token); | ||
2454 | |||
2455 | /* Put back the comma to allow this to be called again */ | ||
2456 | if (buf) | ||
2457 | *(buf - 1) = ','; | ||
2458 | } | ||
2459 | } | ||
2460 | |||
2432 | static __init int event_trace_enable(void) | 2461 | static __init int event_trace_enable(void) |
2433 | { | 2462 | { |
2434 | struct trace_array *tr = top_trace_array(); | 2463 | struct trace_array *tr = top_trace_array(); |
2435 | struct ftrace_event_call **iter, *call; | 2464 | struct ftrace_event_call **iter, *call; |
2436 | char *buf = bootup_event_buf; | ||
2437 | char *token; | ||
2438 | int ret; | 2465 | int ret; |
2439 | 2466 | ||
2440 | if (!tr) | 2467 | if (!tr) |
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void) | |||
2456 | */ | 2483 | */ |
2457 | __trace_early_add_events(tr); | 2484 | __trace_early_add_events(tr); |
2458 | 2485 | ||
2459 | while (true) { | 2486 | early_enable_events(tr, false); |
2460 | token = strsep(&buf, ","); | ||
2461 | |||
2462 | if (!token) | ||
2463 | break; | ||
2464 | if (!*token) | ||
2465 | continue; | ||
2466 | |||
2467 | ret = ftrace_set_clr_event(tr, token, 1); | ||
2468 | if (ret) | ||
2469 | pr_warn("Failed to enable trace event: %s\n", token); | ||
2470 | } | ||
2471 | 2487 | ||
2472 | trace_printk_start_comm(); | 2488 | trace_printk_start_comm(); |
2473 | 2489 | ||
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void) | |||
2478 | return 0; | 2494 | return 0; |
2479 | } | 2495 | } |
2480 | 2496 | ||
2497 | /* | ||
2498 | * event_trace_enable() is called from trace_event_init() first to | ||
2499 | * initialize events and perhaps start any events that are on the | ||
2500 | * command line. Unfortunately, there are some events that will not | ||
2501 | * start this early, like the system call tracepoints that need | ||
2502 | * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable() | ||
2503 | * is called before pid 1 starts, and this flag is never set, making | ||
2504 | * the syscall tracepoint never get reached, but the event is enabled | ||
2505 | * regardless (and not doing anything). | ||
2506 | */ | ||
2507 | static __init int event_trace_enable_again(void) | ||
2508 | { | ||
2509 | struct trace_array *tr; | ||
2510 | |||
2511 | tr = top_trace_array(); | ||
2512 | if (!tr) | ||
2513 | return -ENODEV; | ||
2514 | |||
2515 | early_enable_events(tr, true); | ||
2516 | |||
2517 | return 0; | ||
2518 | } | ||
2519 | |||
2520 | early_initcall(event_trace_enable_again); | ||
2521 | |||
2481 | static __init int event_trace_init(void) | 2522 | static __init int event_trace_init(void) |
2482 | { | 2523 | { |
2483 | struct trace_array *tr; | 2524 | struct trace_array *tr; |
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index b0b1c44e923a..3ccf5c2c1320 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c | |||
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv) | |||
132 | 132 | ||
133 | static __init int kdb_ftrace_register(void) | 133 | static __init int kdb_ftrace_register(void) |
134 | { | 134 | { |
135 | kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", | 135 | kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", |
136 | "Dump ftrace log", 0, KDB_REPEAT_NONE); | 136 | "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); |
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6202b08f1933..beeeac9e0e3e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
1841 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 1841 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
1842 | * multiple times. Does GFP_KERNEL allocations. Called only from | 1842 | * multiple times. Does GFP_KERNEL allocations. Called only from |
1843 | * manager. | 1843 | * manager. |
1844 | * | ||
1845 | * Return: | ||
1846 | * %false if no action was taken and pool->lock stayed locked, %true | ||
1847 | * otherwise. | ||
1848 | */ | 1844 | */ |
1849 | static bool maybe_create_worker(struct worker_pool *pool) | 1845 | static void maybe_create_worker(struct worker_pool *pool) |
1850 | __releases(&pool->lock) | 1846 | __releases(&pool->lock) |
1851 | __acquires(&pool->lock) | 1847 | __acquires(&pool->lock) |
1852 | { | 1848 | { |
1853 | if (!need_to_create_worker(pool)) | ||
1854 | return false; | ||
1855 | restart: | 1849 | restart: |
1856 | spin_unlock_irq(&pool->lock); | 1850 | spin_unlock_irq(&pool->lock); |
1857 | 1851 | ||
@@ -1877,7 +1871,6 @@ restart: | |||
1877 | */ | 1871 | */ |
1878 | if (need_to_create_worker(pool)) | 1872 | if (need_to_create_worker(pool)) |
1879 | goto restart; | 1873 | goto restart; |
1880 | return true; | ||
1881 | } | 1874 | } |
1882 | 1875 | ||
1883 | /** | 1876 | /** |
@@ -1897,16 +1890,14 @@ restart: | |||
1897 | * multiple times. Does GFP_KERNEL allocations. | 1890 | * multiple times. Does GFP_KERNEL allocations. |
1898 | * | 1891 | * |
1899 | * Return: | 1892 | * Return: |
1900 | * %false if the pool don't need management and the caller can safely start | 1893 | * %false if the pool doesn't need management and the caller can safely |
1901 | * processing works, %true indicates that the function released pool->lock | 1894 | * start processing works, %true if management function was performed and |
1902 | * and reacquired it to perform some management function and that the | 1895 | * the conditions that the caller verified before calling the function may |
1903 | * conditions that the caller verified while holding the lock before | 1896 | * no longer be true. |
1904 | * calling the function might no longer be true. | ||
1905 | */ | 1897 | */ |
1906 | static bool manage_workers(struct worker *worker) | 1898 | static bool manage_workers(struct worker *worker) |
1907 | { | 1899 | { |
1908 | struct worker_pool *pool = worker->pool; | 1900 | struct worker_pool *pool = worker->pool; |
1909 | bool ret = false; | ||
1910 | 1901 | ||
1911 | /* | 1902 | /* |
1912 | * Anyone who successfully grabs manager_arb wins the arbitration | 1903 | * Anyone who successfully grabs manager_arb wins the arbitration |
@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker) | |||
1919 | * actual management, the pool may stall indefinitely. | 1910 | * actual management, the pool may stall indefinitely. |
1920 | */ | 1911 | */ |
1921 | if (!mutex_trylock(&pool->manager_arb)) | 1912 | if (!mutex_trylock(&pool->manager_arb)) |
1922 | return ret; | 1913 | return false; |
1923 | 1914 | ||
1924 | ret |= maybe_create_worker(pool); | 1915 | maybe_create_worker(pool); |
1925 | 1916 | ||
1926 | mutex_unlock(&pool->manager_arb); | 1917 | mutex_unlock(&pool->manager_arb); |
1927 | return ret; | 1918 | return true; |
1928 | } | 1919 | } |
1929 | 1920 | ||
1930 | /** | 1921 | /** |