aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 15:34:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 15:34:08 -0400
commitcd9b44f90763c3367e8dd0601849ffb028e8ba52 (patch)
treed1da0c2d49e3622fe51584a154ab383b3e783c1b /kernel
parentdf2def49c57b4146520a1f4ca37bc3f494e2cd67 (diff)
parent2a9d6481004215da8e93edb588cf448f2af80303 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: - the rest of MM - procfs updates - various misc things - more y2038 fixes - get_maintainer updates - lib/ updates - checkpatch updates - various epoll updates - autofs updates - hfsplus - some reiserfs work - fatfs updates - signal.c cleanups - ipc/ updates * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (166 commits) ipc/util.c: update return value of ipc_getref from int to bool ipc/util.c: further variable name cleanups ipc: simplify ipc initialization ipc: get rid of ids->tables_initialized hack lib/rhashtable: guarantee initial hashtable allocation lib/rhashtable: simplify bucket_table_alloc() ipc: drop ipc_lock() ipc/util.c: correct comment in ipc_obtain_object_check ipc: rename ipcctl_pre_down_nolock() ipc/util.c: use ipc_rcu_putref() for failues in ipc_addid() ipc: reorganize initialization of kern_ipc_perm.seq ipc: compute kern_ipc_perm.id under the ipc lock init/Kconfig: remove EXPERT from CHECKPOINT_RESTORE fs/sysv/inode.c: use ktime_get_real_seconds() for superblock stamp adfs: use timespec64 for time conversion kernel/sysctl.c: fix typos in comments drivers/rapidio/devices/rio_mport_cdev.c: remove redundant pointer md fork: don't copy inconsistent signal handler state to child signal: make get_signal() return bool signal: make sigkill_pending() return bool ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/crash_core.c8
-rw-r--r--kernel/fork.c6
-rw-r--r--kernel/hung_task.c15
-rw-r--r--kernel/module.c32
-rw-r--r--kernel/printk/printk.c16
-rw-r--r--kernel/sched/wait.c2
-rw-r--r--kernel/signal.c170
-rw-r--r--kernel/sysctl.c19
-rw-r--r--kernel/tracepoint.c49
-rw-r--r--kernel/user.c11
10 files changed, 188 insertions, 140 deletions
diff --git a/kernel/crash_core.c b/kernel/crash_core.c
index b66aced5e8c2..933cb3e45b98 100644
--- a/kernel/crash_core.c
+++ b/kernel/crash_core.c
@@ -14,8 +14,8 @@
14#include <asm/sections.h> 14#include <asm/sections.h>
15 15
16/* vmcoreinfo stuff */ 16/* vmcoreinfo stuff */
17static unsigned char *vmcoreinfo_data; 17unsigned char *vmcoreinfo_data;
18static size_t vmcoreinfo_size; 18size_t vmcoreinfo_size;
19u32 *vmcoreinfo_note; 19u32 *vmcoreinfo_note;
20 20
21/* trusted vmcoreinfo, e.g. we can make a copy in the crash memory */ 21/* trusted vmcoreinfo, e.g. we can make a copy in the crash memory */
@@ -344,7 +344,7 @@ void crash_save_vmcoreinfo(void)
344 if (vmcoreinfo_data_safecopy) 344 if (vmcoreinfo_data_safecopy)
345 vmcoreinfo_data = vmcoreinfo_data_safecopy; 345 vmcoreinfo_data = vmcoreinfo_data_safecopy;
346 346
347 vmcoreinfo_append_str("CRASHTIME=%ld\n", get_seconds()); 347 vmcoreinfo_append_str("CRASHTIME=%lld\n", ktime_get_real_seconds());
348 update_vmcoreinfo_note(); 348 update_vmcoreinfo_note();
349} 349}
350 350
@@ -401,7 +401,7 @@ static int __init crash_save_vmcoreinfo_init(void)
401 VMCOREINFO_SYMBOL(init_uts_ns); 401 VMCOREINFO_SYMBOL(init_uts_ns);
402 VMCOREINFO_SYMBOL(node_online_map); 402 VMCOREINFO_SYMBOL(node_online_map);
403#ifdef CONFIG_MMU 403#ifdef CONFIG_MMU
404 VMCOREINFO_SYMBOL(swapper_pg_dir); 404 VMCOREINFO_SYMBOL_ARRAY(swapper_pg_dir);
405#endif 405#endif
406 VMCOREINFO_SYMBOL(_stext); 406 VMCOREINFO_SYMBOL(_stext);
407 VMCOREINFO_SYMBOL(vmap_area_list); 407 VMCOREINFO_SYMBOL(vmap_area_list);
diff --git a/kernel/fork.c b/kernel/fork.c
index ff5037be7771..d896e9ca38b0 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -310,8 +310,9 @@ static struct kmem_cache *mm_cachep;
310 310
311struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) 311struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
312{ 312{
313 struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 313 struct vm_area_struct *vma;
314 314
315 vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
315 if (vma) 316 if (vma)
316 vma_init(vma, mm); 317 vma_init(vma, mm);
317 return vma; 318 return vma;
@@ -1301,6 +1302,7 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
1301 tsk->nvcsw = tsk->nivcsw = 0; 1302 tsk->nvcsw = tsk->nivcsw = 0;
1302#ifdef CONFIG_DETECT_HUNG_TASK 1303#ifdef CONFIG_DETECT_HUNG_TASK
1303 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; 1304 tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
1305 tsk->last_switch_time = 0;
1304#endif 1306#endif
1305 1307
1306 tsk->mm = NULL; 1308 tsk->mm = NULL;
@@ -1425,7 +1427,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
1425 return -ENOMEM; 1427 return -ENOMEM;
1426 1428
1427 atomic_set(&sig->count, 1); 1429 atomic_set(&sig->count, 1);
1430 spin_lock_irq(&current->sighand->siglock);
1428 memcpy(sig->action, current->sighand->action, sizeof(sig->action)); 1431 memcpy(sig->action, current->sighand->action, sizeof(sig->action));
1432 spin_unlock_irq(&current->sighand->siglock);
1429 return 0; 1433 return 0;
1430} 1434}
1431 1435
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 32b479468e4d..b9132d1269ef 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -40,6 +40,11 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
40 */ 40 */
41unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT; 41unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
42 42
43/*
44 * Zero (default value) means use sysctl_hung_task_timeout_secs:
45 */
46unsigned long __read_mostly sysctl_hung_task_check_interval_secs;
47
43int __read_mostly sysctl_hung_task_warnings = 10; 48int __read_mostly sysctl_hung_task_warnings = 10;
44 49
45static int __read_mostly did_panic; 50static int __read_mostly did_panic;
@@ -98,8 +103,11 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
98 103
99 if (switch_count != t->last_switch_count) { 104 if (switch_count != t->last_switch_count) {
100 t->last_switch_count = switch_count; 105 t->last_switch_count = switch_count;
106 t->last_switch_time = jiffies;
101 return; 107 return;
102 } 108 }
109 if (time_is_after_jiffies(t->last_switch_time + timeout * HZ))
110 return;
103 111
104 trace_sched_process_hang(t); 112 trace_sched_process_hang(t);
105 113
@@ -245,8 +253,13 @@ static int watchdog(void *dummy)
245 253
246 for ( ; ; ) { 254 for ( ; ; ) {
247 unsigned long timeout = sysctl_hung_task_timeout_secs; 255 unsigned long timeout = sysctl_hung_task_timeout_secs;
248 long t = hung_timeout_jiffies(hung_last_checked, timeout); 256 unsigned long interval = sysctl_hung_task_check_interval_secs;
257 long t;
249 258
259 if (interval == 0)
260 interval = timeout;
261 interval = min_t(unsigned long, interval, timeout);
262 t = hung_timeout_jiffies(hung_last_checked, interval);
250 if (t <= 0) { 263 if (t <= 0) {
251 if (!atomic_xchg(&reset_hung_task, 0)) 264 if (!atomic_xchg(&reset_hung_task, 0))
252 check_hung_uninterruptible_tasks(timeout); 265 check_hung_uninterruptible_tasks(timeout);
diff --git a/kernel/module.c b/kernel/module.c
index b046a32520d8..6746c85511fe 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -529,12 +529,30 @@ static bool check_symbol(const struct symsearch *syms,
529 return true; 529 return true;
530} 530}
531 531
532static unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
533{
534#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
535 return (unsigned long)offset_to_ptr(&sym->value_offset);
536#else
537 return sym->value;
538#endif
539}
540
541static const char *kernel_symbol_name(const struct kernel_symbol *sym)
542{
543#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
544 return offset_to_ptr(&sym->name_offset);
545#else
546 return sym->name;
547#endif
548}
549
532static int cmp_name(const void *va, const void *vb) 550static int cmp_name(const void *va, const void *vb)
533{ 551{
534 const char *a; 552 const char *a;
535 const struct kernel_symbol *b; 553 const struct kernel_symbol *b;
536 a = va; b = vb; 554 a = va; b = vb;
537 return strcmp(a, b->name); 555 return strcmp(a, kernel_symbol_name(b));
538} 556}
539 557
540static bool find_symbol_in_section(const struct symsearch *syms, 558static bool find_symbol_in_section(const struct symsearch *syms,
@@ -2170,7 +2188,7 @@ void *__symbol_get(const char *symbol)
2170 sym = NULL; 2188 sym = NULL;
2171 preempt_enable(); 2189 preempt_enable();
2172 2190
2173 return sym ? (void *)sym->value : NULL; 2191 return sym ? (void *)kernel_symbol_value(sym) : NULL;
2174} 2192}
2175EXPORT_SYMBOL_GPL(__symbol_get); 2193EXPORT_SYMBOL_GPL(__symbol_get);
2176 2194
@@ -2200,10 +2218,12 @@ static int verify_export_symbols(struct module *mod)
2200 2218
2201 for (i = 0; i < ARRAY_SIZE(arr); i++) { 2219 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2202 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { 2220 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2203 if (find_symbol(s->name, &owner, NULL, true, false)) { 2221 if (find_symbol(kernel_symbol_name(s), &owner, NULL,
2222 true, false)) {
2204 pr_err("%s: exports duplicate symbol %s" 2223 pr_err("%s: exports duplicate symbol %s"
2205 " (owned by %s)\n", 2224 " (owned by %s)\n",
2206 mod->name, s->name, module_name(owner)); 2225 mod->name, kernel_symbol_name(s),
2226 module_name(owner));
2207 return -ENOEXEC; 2227 return -ENOEXEC;
2208 } 2228 }
2209 } 2229 }
@@ -2252,7 +2272,7 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
2252 ksym = resolve_symbol_wait(mod, info, name); 2272 ksym = resolve_symbol_wait(mod, info, name);
2253 /* Ok if resolved. */ 2273 /* Ok if resolved. */
2254 if (ksym && !IS_ERR(ksym)) { 2274 if (ksym && !IS_ERR(ksym)) {
2255 sym[i].st_value = ksym->value; 2275 sym[i].st_value = kernel_symbol_value(ksym);
2256 break; 2276 break;
2257 } 2277 }
2258 2278
@@ -2516,7 +2536,7 @@ static int is_exported(const char *name, unsigned long value,
2516 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); 2536 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2517 else 2537 else
2518 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); 2538 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2519 return ks != NULL && ks->value == value; 2539 return ks != NULL && kernel_symbol_value(ks) == value;
2520} 2540}
2521 2541
2522/* As per nm */ 2542/* As per nm */
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 90b6ab01db59..918f386b2f6e 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2788,7 +2788,8 @@ EXPORT_SYMBOL(unregister_console);
2788void __init console_init(void) 2788void __init console_init(void)
2789{ 2789{
2790 int ret; 2790 int ret;
2791 initcall_t *call; 2791 initcall_t call;
2792 initcall_entry_t *ce;
2792 2793
2793 /* Setup the default TTY line discipline. */ 2794 /* Setup the default TTY line discipline. */
2794 n_tty_init(); 2795 n_tty_init();
@@ -2797,13 +2798,14 @@ void __init console_init(void)
2797 * set up the console device so that later boot sequences can 2798 * set up the console device so that later boot sequences can
2798 * inform about problems etc.. 2799 * inform about problems etc..
2799 */ 2800 */
2800 call = __con_initcall_start; 2801 ce = __con_initcall_start;
2801 trace_initcall_level("console"); 2802 trace_initcall_level("console");
2802 while (call < __con_initcall_end) { 2803 while (ce < __con_initcall_end) {
2803 trace_initcall_start((*call)); 2804 call = initcall_from_entry(ce);
2804 ret = (*call)(); 2805 trace_initcall_start(call);
2805 trace_initcall_finish((*call), ret); 2806 ret = call();
2806 call++; 2807 trace_initcall_finish(call, ret);
2808 ce++;
2807 } 2809 }
2808} 2810}
2809 2811
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 870f97b313e3..5dd47f1103d1 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -69,6 +69,8 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
69 wait_queue_entry_t *curr, *next; 69 wait_queue_entry_t *curr, *next;
70 int cnt = 0; 70 int cnt = 0;
71 71
72 lockdep_assert_held(&wq_head->lock);
73
72 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { 74 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
73 curr = list_next_entry(bookmark, entry); 75 curr = list_next_entry(bookmark, entry);
74 76
diff --git a/kernel/signal.c b/kernel/signal.c
index cfa9d10e731a..5843c541fda9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -65,14 +65,14 @@ static void __user *sig_handler(struct task_struct *t, int sig)
65 return t->sighand->action[sig - 1].sa.sa_handler; 65 return t->sighand->action[sig - 1].sa.sa_handler;
66} 66}
67 67
68static int sig_handler_ignored(void __user *handler, int sig) 68static inline bool sig_handler_ignored(void __user *handler, int sig)
69{ 69{
70 /* Is it explicitly or implicitly ignored? */ 70 /* Is it explicitly or implicitly ignored? */
71 return handler == SIG_IGN || 71 return handler == SIG_IGN ||
72 (handler == SIG_DFL && sig_kernel_ignore(sig)); 72 (handler == SIG_DFL && sig_kernel_ignore(sig));
73} 73}
74 74
75static int sig_task_ignored(struct task_struct *t, int sig, bool force) 75static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
76{ 76{
77 void __user *handler; 77 void __user *handler;
78 78
@@ -80,12 +80,12 @@ static int sig_task_ignored(struct task_struct *t, int sig, bool force)
80 80
81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && 81 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
82 handler == SIG_DFL && !(force && sig_kernel_only(sig))) 82 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
83 return 1; 83 return true;
84 84
85 return sig_handler_ignored(handler, sig); 85 return sig_handler_ignored(handler, sig);
86} 86}
87 87
88static int sig_ignored(struct task_struct *t, int sig, bool force) 88static bool sig_ignored(struct task_struct *t, int sig, bool force)
89{ 89{
90 /* 90 /*
91 * Blocked signals are never ignored, since the 91 * Blocked signals are never ignored, since the
@@ -93,7 +93,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
93 * unblocked. 93 * unblocked.
94 */ 94 */
95 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 95 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
96 return 0; 96 return false;
97 97
98 /* 98 /*
99 * Tracers may want to know about even ignored signal unless it 99 * Tracers may want to know about even ignored signal unless it
@@ -101,7 +101,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
101 * by SIGNAL_UNKILLABLE task. 101 * by SIGNAL_UNKILLABLE task.
102 */ 102 */
103 if (t->ptrace && sig != SIGKILL) 103 if (t->ptrace && sig != SIGKILL)
104 return 0; 104 return false;
105 105
106 return sig_task_ignored(t, sig, force); 106 return sig_task_ignored(t, sig, force);
107} 107}
@@ -110,7 +110,7 @@ static int sig_ignored(struct task_struct *t, int sig, bool force)
110 * Re-calculate pending state from the set of locally pending 110 * Re-calculate pending state from the set of locally pending
111 * signals, globally pending signals, and blocked signals. 111 * signals, globally pending signals, and blocked signals.
112 */ 112 */
113static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) 113static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
114{ 114{
115 unsigned long ready; 115 unsigned long ready;
116 long i; 116 long i;
@@ -138,20 +138,21 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
138 138
139#define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) 139#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
140 140
141static int recalc_sigpending_tsk(struct task_struct *t) 141static bool recalc_sigpending_tsk(struct task_struct *t)
142{ 142{
143 if ((t->jobctl & JOBCTL_PENDING_MASK) || 143 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
144 PENDING(&t->pending, &t->blocked) || 144 PENDING(&t->pending, &t->blocked) ||
145 PENDING(&t->signal->shared_pending, &t->blocked)) { 145 PENDING(&t->signal->shared_pending, &t->blocked)) {
146 set_tsk_thread_flag(t, TIF_SIGPENDING); 146 set_tsk_thread_flag(t, TIF_SIGPENDING);
147 return 1; 147 return true;
148 } 148 }
149
149 /* 150 /*
150 * We must never clear the flag in another thread, or in current 151 * We must never clear the flag in another thread, or in current
151 * when it's possible the current syscall is returning -ERESTART*. 152 * when it's possible the current syscall is returning -ERESTART*.
152 * So we don't clear it here, and only callers who know they should do. 153 * So we don't clear it here, and only callers who know they should do.
153 */ 154 */
154 return 0; 155 return false;
155} 156}
156 157
157/* 158/*
@@ -529,13 +530,15 @@ flush_signal_handlers(struct task_struct *t, int force_default)
529 } 530 }
530} 531}
531 532
532int unhandled_signal(struct task_struct *tsk, int sig) 533bool unhandled_signal(struct task_struct *tsk, int sig)
533{ 534{
534 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; 535 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
535 if (is_global_init(tsk)) 536 if (is_global_init(tsk))
536 return 1; 537 return true;
538
537 if (handler != SIG_IGN && handler != SIG_DFL) 539 if (handler != SIG_IGN && handler != SIG_DFL)
538 return 0; 540 return false;
541
539 /* if ptraced, let the tracer determine */ 542 /* if ptraced, let the tracer determine */
540 return !tsk->ptrace; 543 return !tsk->ptrace;
541} 544}
@@ -709,14 +712,14 @@ void signal_wake_up_state(struct task_struct *t, unsigned int state)
709 * 712 *
710 * All callers must be holding the siglock. 713 * All callers must be holding the siglock.
711 */ 714 */
712static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) 715static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
713{ 716{
714 struct sigqueue *q, *n; 717 struct sigqueue *q, *n;
715 sigset_t m; 718 sigset_t m;
716 719
717 sigandsets(&m, mask, &s->signal); 720 sigandsets(&m, mask, &s->signal);
718 if (sigisemptyset(&m)) 721 if (sigisemptyset(&m))
719 return 0; 722 return;
720 723
721 sigandnsets(&s->signal, &s->signal, mask); 724 sigandnsets(&s->signal, &s->signal, mask);
722 list_for_each_entry_safe(q, n, &s->list, list) { 725 list_for_each_entry_safe(q, n, &s->list, list) {
@@ -725,7 +728,6 @@ static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
725 __sigqueue_free(q); 728 __sigqueue_free(q);
726 } 729 }
727 } 730 }
728 return 1;
729} 731}
730 732
731static inline int is_si_special(const struct siginfo *info) 733static inline int is_si_special(const struct siginfo *info)
@@ -742,21 +744,16 @@ static inline bool si_fromuser(const struct siginfo *info)
742/* 744/*
743 * called with RCU read lock from check_kill_permission() 745 * called with RCU read lock from check_kill_permission()
744 */ 746 */
745static int kill_ok_by_cred(struct task_struct *t) 747static bool kill_ok_by_cred(struct task_struct *t)
746{ 748{
747 const struct cred *cred = current_cred(); 749 const struct cred *cred = current_cred();
748 const struct cred *tcred = __task_cred(t); 750 const struct cred *tcred = __task_cred(t);
749 751
750 if (uid_eq(cred->euid, tcred->suid) || 752 return uid_eq(cred->euid, tcred->suid) ||
751 uid_eq(cred->euid, tcred->uid) || 753 uid_eq(cred->euid, tcred->uid) ||
752 uid_eq(cred->uid, tcred->suid) || 754 uid_eq(cred->uid, tcred->suid) ||
753 uid_eq(cred->uid, tcred->uid)) 755 uid_eq(cred->uid, tcred->uid) ||
754 return 1; 756 ns_capable(tcred->user_ns, CAP_KILL);
755
756 if (ns_capable(tcred->user_ns, CAP_KILL))
757 return 1;
758
759 return 0;
760} 757}
761 758
762/* 759/*
@@ -907,16 +904,20 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
907 * as soon as they're available, so putting the signal on the shared queue 904 * as soon as they're available, so putting the signal on the shared queue
908 * will be equivalent to sending it to one such thread. 905 * will be equivalent to sending it to one such thread.
909 */ 906 */
910static inline int wants_signal(int sig, struct task_struct *p) 907static inline bool wants_signal(int sig, struct task_struct *p)
911{ 908{
912 if (sigismember(&p->blocked, sig)) 909 if (sigismember(&p->blocked, sig))
913 return 0; 910 return false;
911
914 if (p->flags & PF_EXITING) 912 if (p->flags & PF_EXITING)
915 return 0; 913 return false;
914
916 if (sig == SIGKILL) 915 if (sig == SIGKILL)
917 return 1; 916 return true;
917
918 if (task_is_stopped_or_traced(p)) 918 if (task_is_stopped_or_traced(p))
919 return 0; 919 return false;
920
920 return task_curr(p) || !signal_pending(p); 921 return task_curr(p) || !signal_pending(p);
921} 922}
922 923
@@ -996,7 +997,7 @@ static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
996 return; 997 return;
997} 998}
998 999
999static inline int legacy_queue(struct sigpending *signals, int sig) 1000static inline bool legacy_queue(struct sigpending *signals, int sig)
1000{ 1001{
1001 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 1002 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1002} 1003}
@@ -1380,14 +1381,15 @@ static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1380 return error; 1381 return error;
1381} 1382}
1382 1383
1383static int kill_as_cred_perm(const struct cred *cred, 1384static inline bool kill_as_cred_perm(const struct cred *cred,
1384 struct task_struct *target) 1385 struct task_struct *target)
1385{ 1386{
1386 const struct cred *pcred = __task_cred(target); 1387 const struct cred *pcred = __task_cred(target);
1387 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) && 1388
1388 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid)) 1389 return uid_eq(cred->euid, pcred->suid) ||
1389 return 0; 1390 uid_eq(cred->euid, pcred->uid) ||
1390 return 1; 1391 uid_eq(cred->uid, pcred->suid) ||
1392 uid_eq(cred->uid, pcred->uid);
1391} 1393}
1392 1394
1393/* like kill_pid_info(), but doesn't use uid/euid of "current" */ 1395/* like kill_pid_info(), but doesn't use uid/euid of "current" */
@@ -1500,8 +1502,7 @@ send_sig(int sig, struct task_struct *p, int priv)
1500 return send_sig_info(sig, __si_special(priv), p); 1502 return send_sig_info(sig, __si_special(priv), p);
1501} 1503}
1502 1504
1503void 1505void force_sig(int sig, struct task_struct *p)
1504force_sig(int sig, struct task_struct *p)
1505{ 1506{
1506 force_sig_info(sig, SEND_SIG_PRIV, p); 1507 force_sig_info(sig, SEND_SIG_PRIV, p);
1507} 1508}
@@ -1512,8 +1513,7 @@ force_sig(int sig, struct task_struct *p)
1512 * the problem was already a SIGSEGV, we'll want to 1513 * the problem was already a SIGSEGV, we'll want to
1513 * make sure we don't even try to deliver the signal.. 1514 * make sure we don't even try to deliver the signal..
1514 */ 1515 */
1515int 1516void force_sigsegv(int sig, struct task_struct *p)
1516force_sigsegv(int sig, struct task_struct *p)
1517{ 1517{
1518 if (sig == SIGSEGV) { 1518 if (sig == SIGSEGV) {
1519 unsigned long flags; 1519 unsigned long flags;
@@ -1522,7 +1522,6 @@ force_sigsegv(int sig, struct task_struct *p)
1522 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1522 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1523 } 1523 }
1524 force_sig(SIGSEGV, p); 1524 force_sig(SIGSEGV, p);
1525 return 0;
1526} 1525}
1527 1526
1528int force_sig_fault(int sig, int code, void __user *addr 1527int force_sig_fault(int sig, int code, void __user *addr
@@ -1923,10 +1922,10 @@ static void do_notify_parent_cldstop(struct task_struct *tsk,
1923 spin_unlock_irqrestore(&sighand->siglock, flags); 1922 spin_unlock_irqrestore(&sighand->siglock, flags);
1924} 1923}
1925 1924
1926static inline int may_ptrace_stop(void) 1925static inline bool may_ptrace_stop(void)
1927{ 1926{
1928 if (!likely(current->ptrace)) 1927 if (!likely(current->ptrace))
1929 return 0; 1928 return false;
1930 /* 1929 /*
1931 * Are we in the middle of do_coredump? 1930 * Are we in the middle of do_coredump?
1932 * If so and our tracer is also part of the coredump stopping 1931 * If so and our tracer is also part of the coredump stopping
@@ -1942,19 +1941,19 @@ static inline int may_ptrace_stop(void)
1942 */ 1941 */
1943 if (unlikely(current->mm->core_state) && 1942 if (unlikely(current->mm->core_state) &&
1944 unlikely(current->mm == current->parent->mm)) 1943 unlikely(current->mm == current->parent->mm))
1945 return 0; 1944 return false;
1946 1945
1947 return 1; 1946 return true;
1948} 1947}
1949 1948
1950/* 1949/*
1951 * Return non-zero if there is a SIGKILL that should be waking us up. 1950 * Return non-zero if there is a SIGKILL that should be waking us up.
1952 * Called with the siglock held. 1951 * Called with the siglock held.
1953 */ 1952 */
1954static int sigkill_pending(struct task_struct *tsk) 1953static bool sigkill_pending(struct task_struct *tsk)
1955{ 1954{
1956 return sigismember(&tsk->pending.signal, SIGKILL) || 1955 return sigismember(&tsk->pending.signal, SIGKILL) ||
1957 sigismember(&tsk->signal->shared_pending.signal, SIGKILL); 1956 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1958} 1957}
1959 1958
1960/* 1959/*
@@ -2334,7 +2333,7 @@ static int ptrace_signal(int signr, siginfo_t *info)
2334 return signr; 2333 return signr;
2335} 2334}
2336 2335
2337int get_signal(struct ksignal *ksig) 2336bool get_signal(struct ksignal *ksig)
2338{ 2337{
2339 struct sighand_struct *sighand = current->sighand; 2338 struct sighand_struct *sighand = current->sighand;
2340 struct signal_struct *signal = current->signal; 2339 struct signal_struct *signal = current->signal;
@@ -2344,7 +2343,7 @@ int get_signal(struct ksignal *ksig)
2344 task_work_run(); 2343 task_work_run();
2345 2344
2346 if (unlikely(uprobe_deny_signal())) 2345 if (unlikely(uprobe_deny_signal()))
2347 return 0; 2346 return false;
2348 2347
2349 /* 2348 /*
2350 * Do this once, we can't return to user-mode if freezing() == T. 2349 * Do this once, we can't return to user-mode if freezing() == T.
@@ -2801,7 +2800,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2801} 2800}
2802#endif 2801#endif
2803 2802
2804static int do_sigpending(sigset_t *set) 2803static void do_sigpending(sigset_t *set)
2805{ 2804{
2806 spin_lock_irq(&current->sighand->siglock); 2805 spin_lock_irq(&current->sighand->siglock);
2807 sigorsets(set, &current->pending.signal, 2806 sigorsets(set, &current->pending.signal,
@@ -2810,7 +2809,6 @@ static int do_sigpending(sigset_t *set)
2810 2809
2811 /* Outside the lock because only this thread touches it. */ 2810 /* Outside the lock because only this thread touches it. */
2812 sigandsets(set, &current->blocked, set); 2811 sigandsets(set, &current->blocked, set);
2813 return 0;
2814} 2812}
2815 2813
2816/** 2814/**
@@ -2822,15 +2820,16 @@ static int do_sigpending(sigset_t *set)
2822SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 2820SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2823{ 2821{
2824 sigset_t set; 2822 sigset_t set;
2825 int err;
2826 2823
2827 if (sigsetsize > sizeof(*uset)) 2824 if (sigsetsize > sizeof(*uset))
2828 return -EINVAL; 2825 return -EINVAL;
2829 2826
2830 err = do_sigpending(&set); 2827 do_sigpending(&set);
2831 if (!err && copy_to_user(uset, &set, sigsetsize)) 2828
2832 err = -EFAULT; 2829 if (copy_to_user(uset, &set, sigsetsize))
2833 return err; 2830 return -EFAULT;
2831
2832 return 0;
2834} 2833}
2835 2834
2836#ifdef CONFIG_COMPAT 2835#ifdef CONFIG_COMPAT
@@ -2838,15 +2837,13 @@ COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2838 compat_size_t, sigsetsize) 2837 compat_size_t, sigsetsize)
2839{ 2838{
2840 sigset_t set; 2839 sigset_t set;
2841 int err;
2842 2840
2843 if (sigsetsize > sizeof(*uset)) 2841 if (sigsetsize > sizeof(*uset))
2844 return -EINVAL; 2842 return -EINVAL;
2845 2843
2846 err = do_sigpending(&set); 2844 do_sigpending(&set);
2847 if (!err) 2845
2848 err = put_compat_sigset(uset, &set, sigsetsize); 2846 return put_compat_sigset(uset, &set, sigsetsize);
2849 return err;
2850} 2847}
2851#endif 2848#endif
2852 2849
@@ -3608,25 +3605,26 @@ int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3608SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) 3605SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
3609{ 3606{
3610 sigset_t set; 3607 sigset_t set;
3611 int err;
3612 3608
3613 if (sizeof(old_sigset_t) > sizeof(*uset)) 3609 if (sizeof(old_sigset_t) > sizeof(*uset))
3614 return -EINVAL; 3610 return -EINVAL;
3615 3611
3616 err = do_sigpending(&set); 3612 do_sigpending(&set);
3617 if (!err && copy_to_user(uset, &set, sizeof(old_sigset_t))) 3613
3618 err = -EFAULT; 3614 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
3619 return err; 3615 return -EFAULT;
3616
3617 return 0;
3620} 3618}
3621 3619
3622#ifdef CONFIG_COMPAT 3620#ifdef CONFIG_COMPAT
3623COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) 3621COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3624{ 3622{
3625 sigset_t set; 3623 sigset_t set;
3626 int err = do_sigpending(&set); 3624
3627 if (!err) 3625 do_sigpending(&set);
3628 err = put_user(set.sig[0], set32); 3626
3629 return err; 3627 return put_user(set.sig[0], set32);
3630} 3628}
3631#endif 3629#endif
3632 3630
@@ -3697,25 +3695,23 @@ SYSCALL_DEFINE4(rt_sigaction, int, sig,
3697 size_t, sigsetsize) 3695 size_t, sigsetsize)
3698{ 3696{
3699 struct k_sigaction new_sa, old_sa; 3697 struct k_sigaction new_sa, old_sa;
3700 int ret = -EINVAL; 3698 int ret;
3701 3699
3702 /* XXX: Don't preclude handling different sized sigset_t's. */ 3700 /* XXX: Don't preclude handling different sized sigset_t's. */
3703 if (sigsetsize != sizeof(sigset_t)) 3701 if (sigsetsize != sizeof(sigset_t))
3704 goto out; 3702 return -EINVAL;
3705 3703
3706 if (act) { 3704 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3707 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) 3705 return -EFAULT;
3708 return -EFAULT;
3709 }
3710 3706
3711 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); 3707 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3708 if (ret)
3709 return ret;
3712 3710
3713 if (!ret && oact) { 3711 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3714 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) 3712 return -EFAULT;
3715 return -EFAULT; 3713
3716 } 3714 return 0;
3717out:
3718 return ret;
3719} 3715}
3720#ifdef CONFIG_COMPAT 3716#ifdef CONFIG_COMPAT
3721COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, 3717COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f22f76b7a138..71ceb6c13c1a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -145,7 +145,10 @@ static int minolduid;
145static int ngroups_max = NGROUPS_MAX; 145static int ngroups_max = NGROUPS_MAX;
146static const int cap_last_cap = CAP_LAST_CAP; 146static const int cap_last_cap = CAP_LAST_CAP;
147 147
148/*this is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs */ 148/*
149 * This is needed for proc_doulongvec_minmax of sysctl_hung_task_timeout_secs
150 * and hung_task_check_interval_secs
151 */
149#ifdef CONFIG_DETECT_HUNG_TASK 152#ifdef CONFIG_DETECT_HUNG_TASK
150static unsigned long hung_task_timeout_max = (LONG_MAX/HZ); 153static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
151#endif 154#endif
@@ -222,7 +225,7 @@ static int proc_dopipe_max_size(struct ctl_table *table, int write,
222 void __user *buffer, size_t *lenp, loff_t *ppos); 225 void __user *buffer, size_t *lenp, loff_t *ppos);
223 226
224#ifdef CONFIG_MAGIC_SYSRQ 227#ifdef CONFIG_MAGIC_SYSRQ
225/* Note: sysrq code uses it's own private copy */ 228/* Note: sysrq code uses its own private copy */
226static int __sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; 229static int __sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
227 230
228static int sysrq_sysctl_handler(struct ctl_table *table, int write, 231static int sysrq_sysctl_handler(struct ctl_table *table, int write,
@@ -1091,6 +1094,14 @@ static struct ctl_table kern_table[] = {
1091 .extra2 = &hung_task_timeout_max, 1094 .extra2 = &hung_task_timeout_max,
1092 }, 1095 },
1093 { 1096 {
1097 .procname = "hung_task_check_interval_secs",
1098 .data = &sysctl_hung_task_check_interval_secs,
1099 .maxlen = sizeof(unsigned long),
1100 .mode = 0644,
1101 .proc_handler = proc_dohung_task_timeout_secs,
1102 .extra2 = &hung_task_timeout_max,
1103 },
1104 {
1094 .procname = "hung_task_warnings", 1105 .procname = "hung_task_warnings",
1095 .data = &sysctl_hung_task_warnings, 1106 .data = &sysctl_hung_task_warnings,
1096 .maxlen = sizeof(int), 1107 .maxlen = sizeof(int),
@@ -1965,13 +1976,13 @@ static void warn_sysctl_write(struct ctl_table *table)
1965} 1976}
1966 1977
1967/** 1978/**
1968 * proc_first_pos_non_zero_ignore - check if firs position is allowed 1979 * proc_first_pos_non_zero_ignore - check if first position is allowed
1969 * @ppos: file position 1980 * @ppos: file position
1970 * @table: the sysctl table 1981 * @table: the sysctl table
1971 * 1982 *
1972 * Returns true if the first position is non-zero and the sysctl_writes_strict 1983 * Returns true if the first position is non-zero and the sysctl_writes_strict
1973 * mode indicates this is not allowed for numeric input types. String proc 1984 * mode indicates this is not allowed for numeric input types. String proc
1974 * hadlers can ignore the return value. 1985 * handlers can ignore the return value.
1975 */ 1986 */
1976static bool proc_first_pos_non_zero_ignore(loff_t *ppos, 1987static bool proc_first_pos_non_zero_ignore(loff_t *ppos,
1977 struct ctl_table *table) 1988 struct ctl_table *table)
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 96db841bf0fc..bf2c06ef9afc 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -371,6 +371,27 @@ int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data)
371} 371}
372EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); 372EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
373 373
374static void for_each_tracepoint_range(struct tracepoint * const *begin,
375 struct tracepoint * const *end,
376 void (*fct)(struct tracepoint *tp, void *priv),
377 void *priv)
378{
379 if (!begin)
380 return;
381
382 if (IS_ENABLED(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)) {
383 const int *iter;
384
385 for (iter = (const int *)begin; iter < (const int *)end; iter++)
386 fct(offset_to_ptr(iter), priv);
387 } else {
388 struct tracepoint * const *iter;
389
390 for (iter = begin; iter < end; iter++)
391 fct(*iter, priv);
392 }
393}
394
374#ifdef CONFIG_MODULES 395#ifdef CONFIG_MODULES
375bool trace_module_has_bad_taint(struct module *mod) 396bool trace_module_has_bad_taint(struct module *mod)
376{ 397{
@@ -435,15 +456,9 @@ EXPORT_SYMBOL_GPL(unregister_tracepoint_module_notifier);
435 * Ensure the tracer unregistered the module's probes before the module 456 * Ensure the tracer unregistered the module's probes before the module
436 * teardown is performed. Prevents leaks of probe and data pointers. 457 * teardown is performed. Prevents leaks of probe and data pointers.
437 */ 458 */
438static void tp_module_going_check_quiescent(struct tracepoint * const *begin, 459static void tp_module_going_check_quiescent(struct tracepoint *tp, void *priv)
439 struct tracepoint * const *end)
440{ 460{
441 struct tracepoint * const *iter; 461 WARN_ON_ONCE(tp->funcs);
442
443 if (!begin)
444 return;
445 for (iter = begin; iter < end; iter++)
446 WARN_ON_ONCE((*iter)->funcs);
447} 462}
448 463
449static int tracepoint_module_coming(struct module *mod) 464static int tracepoint_module_coming(struct module *mod)
@@ -494,8 +509,9 @@ static void tracepoint_module_going(struct module *mod)
494 * Called the going notifier before checking for 509 * Called the going notifier before checking for
495 * quiescence. 510 * quiescence.
496 */ 511 */
497 tp_module_going_check_quiescent(mod->tracepoints_ptrs, 512 for_each_tracepoint_range(mod->tracepoints_ptrs,
498 mod->tracepoints_ptrs + mod->num_tracepoints); 513 mod->tracepoints_ptrs + mod->num_tracepoints,
514 tp_module_going_check_quiescent, NULL);
499 break; 515 break;
500 } 516 }
501 } 517 }
@@ -547,19 +563,6 @@ static __init int init_tracepoints(void)
547__initcall(init_tracepoints); 563__initcall(init_tracepoints);
548#endif /* CONFIG_MODULES */ 564#endif /* CONFIG_MODULES */
549 565
550static void for_each_tracepoint_range(struct tracepoint * const *begin,
551 struct tracepoint * const *end,
552 void (*fct)(struct tracepoint *tp, void *priv),
553 void *priv)
554{
555 struct tracepoint * const *iter;
556
557 if (!begin)
558 return;
559 for (iter = begin; iter < end; iter++)
560 fct(*iter, priv);
561}
562
563/** 566/**
564 * for_each_kernel_tracepoint - iteration on all kernel tracepoints 567 * for_each_kernel_tracepoint - iteration on all kernel tracepoints
565 * @fct: callback 568 * @fct: callback
diff --git a/kernel/user.c b/kernel/user.c
index 36288d840675..0df9b1640b2a 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -96,7 +96,7 @@ static DEFINE_SPINLOCK(uidhash_lock);
96 96
97/* root_user.__count is 1, for init task cred */ 97/* root_user.__count is 1, for init task cred */
98struct user_struct root_user = { 98struct user_struct root_user = {
99 .__count = ATOMIC_INIT(1), 99 .__count = REFCOUNT_INIT(1),
100 .processes = ATOMIC_INIT(1), 100 .processes = ATOMIC_INIT(1),
101 .sigpending = ATOMIC_INIT(0), 101 .sigpending = ATOMIC_INIT(0),
102 .locked_shm = 0, 102 .locked_shm = 0,
@@ -123,7 +123,7 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
123 123
124 hlist_for_each_entry(user, hashent, uidhash_node) { 124 hlist_for_each_entry(user, hashent, uidhash_node) {
125 if (uid_eq(user->uid, uid)) { 125 if (uid_eq(user->uid, uid)) {
126 atomic_inc(&user->__count); 126 refcount_inc(&user->__count);
127 return user; 127 return user;
128 } 128 }
129 } 129 }
@@ -169,11 +169,8 @@ void free_uid(struct user_struct *up)
169 if (!up) 169 if (!up)
170 return; 170 return;
171 171
172 local_irq_save(flags); 172 if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags))
173 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
174 free_user(up, flags); 173 free_user(up, flags);
175 else
176 local_irq_restore(flags);
177} 174}
178 175
179struct user_struct *alloc_uid(kuid_t uid) 176struct user_struct *alloc_uid(kuid_t uid)
@@ -191,7 +188,7 @@ struct user_struct *alloc_uid(kuid_t uid)
191 goto out_unlock; 188 goto out_unlock;
192 189
193 new->uid = uid; 190 new->uid = uid;
194 atomic_set(&new->__count, 1); 191 refcount_set(&new->__count, 1);
195 ratelimit_state_init(&new->ratelimit, HZ, 100); 192 ratelimit_state_init(&new->ratelimit, HZ, 100);
196 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); 193 ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
197 194