aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c2
-rw-r--r--kernel/nsproxy.c3
-rw-r--r--kernel/smp.c183
-rw-r--r--kernel/sys.c288
-rw-r--r--kernel/time.c4
5 files changed, 178 insertions, 302 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 36700e9e2be9..f4bddb900186 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -593,7 +593,7 @@ COMPAT_SYSCALL_DEFINE5(waitid,
593 else 593 else
594 ret = put_compat_rusage(&ru, uru); 594 ret = put_compat_rusage(&ru, uru);
595 if (ret) 595 if (ret)
596 return ret; 596 return -EFAULT;
597 } 597 }
598 598
599 BUG_ON(info.si_code & __SI_MASK); 599 BUG_ON(info.si_code & __SI_MASK);
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 78e2ecb20165..b781e66a8f2c 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -153,8 +153,7 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
153 goto out; 153 goto out;
154 } 154 }
155 155
156 new_ns = create_new_namespaces(flags, tsk, 156 new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs);
157 task_cred_xxx(tsk, user_ns), tsk->fs);
158 if (IS_ERR(new_ns)) { 157 if (IS_ERR(new_ns)) {
159 err = PTR_ERR(new_ns); 158 err = PTR_ERR(new_ns);
160 goto out; 159 goto out;
diff --git a/kernel/smp.c b/kernel/smp.c
index 69f38bd98b42..8e451f3ff51b 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -16,22 +16,12 @@
16#include "smpboot.h" 16#include "smpboot.h"
17 17
18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 18#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
19static struct {
20 struct list_head queue;
21 raw_spinlock_t lock;
22} call_function __cacheline_aligned_in_smp =
23 {
24 .queue = LIST_HEAD_INIT(call_function.queue),
25 .lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
26 };
27
28enum { 19enum {
29 CSD_FLAG_LOCK = 0x01, 20 CSD_FLAG_LOCK = 0x01,
30}; 21};
31 22
32struct call_function_data { 23struct call_function_data {
33 struct call_single_data csd; 24 struct call_single_data __percpu *csd;
34 atomic_t refs;
35 cpumask_var_t cpumask; 25 cpumask_var_t cpumask;
36 cpumask_var_t cpumask_ipi; 26 cpumask_var_t cpumask_ipi;
37}; 27};
@@ -60,6 +50,11 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
60 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, 50 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
61 cpu_to_node(cpu))) 51 cpu_to_node(cpu)))
62 return notifier_from_errno(-ENOMEM); 52 return notifier_from_errno(-ENOMEM);
53 cfd->csd = alloc_percpu(struct call_single_data);
54 if (!cfd->csd) {
55 free_cpumask_var(cfd->cpumask);
56 return notifier_from_errno(-ENOMEM);
57 }
63 break; 58 break;
64 59
65#ifdef CONFIG_HOTPLUG_CPU 60#ifdef CONFIG_HOTPLUG_CPU
@@ -70,6 +65,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
70 case CPU_DEAD_FROZEN: 65 case CPU_DEAD_FROZEN:
71 free_cpumask_var(cfd->cpumask); 66 free_cpumask_var(cfd->cpumask);
72 free_cpumask_var(cfd->cpumask_ipi); 67 free_cpumask_var(cfd->cpumask_ipi);
68 free_percpu(cfd->csd);
73 break; 69 break;
74#endif 70#endif
75 }; 71 };
@@ -171,85 +167,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
171} 167}
172 168
173/* 169/*
174 * Invoked by arch to handle an IPI for call function. Must be called with
175 * interrupts disabled.
176 */
177void generic_smp_call_function_interrupt(void)
178{
179 struct call_function_data *data;
180 int cpu = smp_processor_id();
181
182 /*
183 * Shouldn't receive this interrupt on a cpu that is not yet online.
184 */
185 WARN_ON_ONCE(!cpu_online(cpu));
186
187 /*
188 * Ensure entry is visible on call_function_queue after we have
189 * entered the IPI. See comment in smp_call_function_many.
190 * If we don't have this, then we may miss an entry on the list
191 * and never get another IPI to process it.
192 */
193 smp_mb();
194
195 /*
196 * It's ok to use list_for_each_rcu() here even though we may
197 * delete 'pos', since list_del_rcu() doesn't clear ->next
198 */
199 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
200 int refs;
201 smp_call_func_t func;
202
203 /*
204 * Since we walk the list without any locks, we might
205 * see an entry that was completed, removed from the
206 * list and is in the process of being reused.
207 *
208 * We must check that the cpu is in the cpumask before
209 * checking the refs, and both must be set before
210 * executing the callback on this cpu.
211 */
212
213 if (!cpumask_test_cpu(cpu, data->cpumask))
214 continue;
215
216 smp_rmb();
217
218 if (atomic_read(&data->refs) == 0)
219 continue;
220
221 func = data->csd.func; /* save for later warn */
222 func(data->csd.info);
223
224 /*
225 * If the cpu mask is not still set then func enabled
226 * interrupts (BUG), and this cpu took another smp call
227 * function interrupt and executed func(info) twice
228 * on this cpu. That nested execution decremented refs.
229 */
230 if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
231 WARN(1, "%pf enabled interrupts and double executed\n", func);
232 continue;
233 }
234
235 refs = atomic_dec_return(&data->refs);
236 WARN_ON(refs < 0);
237
238 if (refs)
239 continue;
240
241 WARN_ON(!cpumask_empty(data->cpumask));
242
243 raw_spin_lock(&call_function.lock);
244 list_del_rcu(&data->csd.list);
245 raw_spin_unlock(&call_function.lock);
246
247 csd_unlock(&data->csd);
248 }
249
250}
251
252/*
253 * Invoked by arch to handle an IPI for call function single. Must be 170 * Invoked by arch to handle an IPI for call function single. Must be
254 * called from the arch with interrupts disabled. 171 * called from the arch with interrupts disabled.
255 */ 172 */
@@ -453,8 +370,7 @@ void smp_call_function_many(const struct cpumask *mask,
453 smp_call_func_t func, void *info, bool wait) 370 smp_call_func_t func, void *info, bool wait)
454{ 371{
455 struct call_function_data *data; 372 struct call_function_data *data;
456 unsigned long flags; 373 int cpu, next_cpu, this_cpu = smp_processor_id();
457 int refs, cpu, next_cpu, this_cpu = smp_processor_id();
458 374
459 /* 375 /*
460 * Can deadlock when called with interrupts disabled. 376 * Can deadlock when called with interrupts disabled.
@@ -486,50 +402,13 @@ void smp_call_function_many(const struct cpumask *mask,
486 } 402 }
487 403
488 data = &__get_cpu_var(cfd_data); 404 data = &__get_cpu_var(cfd_data);
489 csd_lock(&data->csd);
490
491 /* This BUG_ON verifies our reuse assertions and can be removed */
492 BUG_ON(atomic_read(&data->refs) || !cpumask_empty(data->cpumask));
493
494 /*
495 * The global call function queue list add and delete are protected
496 * by a lock, but the list is traversed without any lock, relying
497 * on the rcu list add and delete to allow safe concurrent traversal.
498 * We reuse the call function data without waiting for any grace
499 * period after some other cpu removes it from the global queue.
500 * This means a cpu might find our data block as it is being
501 * filled out.
502 *
503 * We hold off the interrupt handler on the other cpu by
504 * ordering our writes to the cpu mask vs our setting of the
505 * refs counter. We assert only the cpu owning the data block
506 * will set a bit in cpumask, and each bit will only be cleared
507 * by the subject cpu. Each cpu must first find its bit is
508 * set and then check that refs is set indicating the element is
509 * ready to be processed, otherwise it must skip the entry.
510 *
511 * On the previous iteration refs was set to 0 by another cpu.
512 * To avoid the use of transitivity, set the counter to 0 here
513 * so the wmb will pair with the rmb in the interrupt handler.
514 */
515 atomic_set(&data->refs, 0); /* convert 3rd to 1st party write */
516
517 data->csd.func = func;
518 data->csd.info = info;
519 405
520 /* Ensure 0 refs is visible before mask. Also orders func and info */
521 smp_wmb();
522
523 /* We rely on the "and" being processed before the store */
524 cpumask_and(data->cpumask, mask, cpu_online_mask); 406 cpumask_and(data->cpumask, mask, cpu_online_mask);
525 cpumask_clear_cpu(this_cpu, data->cpumask); 407 cpumask_clear_cpu(this_cpu, data->cpumask);
526 refs = cpumask_weight(data->cpumask);
527 408
528 /* Some callers race with other cpus changing the passed mask */ 409 /* Some callers race with other cpus changing the passed mask */
529 if (unlikely(!refs)) { 410 if (unlikely(!cpumask_weight(data->cpumask)))
530 csd_unlock(&data->csd);
531 return; 411 return;
532 }
533 412
534 /* 413 /*
535 * After we put an entry into the list, data->cpumask 414 * After we put an entry into the list, data->cpumask
@@ -537,34 +416,32 @@ void smp_call_function_many(const struct cpumask *mask,
537 * a SMP function call, so data->cpumask will be zero. 416 * a SMP function call, so data->cpumask will be zero.
538 */ 417 */
539 cpumask_copy(data->cpumask_ipi, data->cpumask); 418 cpumask_copy(data->cpumask_ipi, data->cpumask);
540 raw_spin_lock_irqsave(&call_function.lock, flags);
541 /*
542 * Place entry at the _HEAD_ of the list, so that any cpu still
543 * observing the entry in generic_smp_call_function_interrupt()
544 * will not miss any other list entries:
545 */
546 list_add_rcu(&data->csd.list, &call_function.queue);
547 /*
548 * We rely on the wmb() in list_add_rcu to complete our writes
549 * to the cpumask before this write to refs, which indicates
550 * data is on the list and is ready to be processed.
551 */
552 atomic_set(&data->refs, refs);
553 raw_spin_unlock_irqrestore(&call_function.lock, flags);
554 419
555 /* 420 for_each_cpu(cpu, data->cpumask) {
556 * Make the list addition visible before sending the ipi. 421 struct call_single_data *csd = per_cpu_ptr(data->csd, cpu);
557 * (IPIs must obey or appear to obey normal Linux cache 422 struct call_single_queue *dst =
558 * coherency rules -- see comment in generic_exec_single). 423 &per_cpu(call_single_queue, cpu);
559 */ 424 unsigned long flags;
560 smp_mb(); 425
426 csd_lock(csd);
427 csd->func = func;
428 csd->info = info;
429
430 raw_spin_lock_irqsave(&dst->lock, flags);
431 list_add_tail(&csd->list, &dst->list);
432 raw_spin_unlock_irqrestore(&dst->lock, flags);
433 }
561 434
562 /* Send a message to all CPUs in the map */ 435 /* Send a message to all CPUs in the map */
563 arch_send_call_function_ipi_mask(data->cpumask_ipi); 436 arch_send_call_function_ipi_mask(data->cpumask_ipi);
564 437
565 /* Optionally wait for the CPUs to complete */ 438 if (wait) {
566 if (wait) 439 for_each_cpu(cpu, data->cpumask) {
567 csd_lock_wait(&data->csd); 440 struct call_single_data *csd =
441 per_cpu_ptr(data->csd, cpu);
442 csd_lock_wait(csd);
443 }
444 }
568} 445}
569EXPORT_SYMBOL(smp_call_function_many); 446EXPORT_SYMBOL(smp_call_function_many);
570 447
diff --git a/kernel/sys.c b/kernel/sys.c
index 265b37690421..840cfdad7bfc 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -47,6 +47,7 @@
47#include <linux/syscalls.h> 47#include <linux/syscalls.h>
48#include <linux/kprobes.h> 48#include <linux/kprobes.h>
49#include <linux/user_namespace.h> 49#include <linux/user_namespace.h>
50#include <linux/binfmts.h>
50 51
51#include <linux/kmsg_dump.h> 52#include <linux/kmsg_dump.h>
52/* Move somewhere else to avoid recompiling? */ 53/* Move somewhere else to avoid recompiling? */
@@ -2012,160 +2013,159 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2012 2013
2013 error = 0; 2014 error = 0;
2014 switch (option) { 2015 switch (option) {
2015 case PR_SET_PDEATHSIG: 2016 case PR_SET_PDEATHSIG:
2016 if (!valid_signal(arg2)) { 2017 if (!valid_signal(arg2)) {
2017 error = -EINVAL; 2018 error = -EINVAL;
2018 break;
2019 }
2020 me->pdeath_signal = arg2;
2021 break;
2022 case PR_GET_PDEATHSIG:
2023 error = put_user(me->pdeath_signal, (int __user *)arg2);
2024 break;
2025 case PR_GET_DUMPABLE:
2026 error = get_dumpable(me->mm);
2027 break; 2019 break;
2028 case PR_SET_DUMPABLE: 2020 }
2029 if (arg2 < 0 || arg2 > 1) { 2021 me->pdeath_signal = arg2;
2030 error = -EINVAL; 2022 break;
2031 break; 2023 case PR_GET_PDEATHSIG:
2032 } 2024 error = put_user(me->pdeath_signal, (int __user *)arg2);
2033 set_dumpable(me->mm, arg2); 2025 break;
2026 case PR_GET_DUMPABLE:
2027 error = get_dumpable(me->mm);
2028 break;
2029 case PR_SET_DUMPABLE:
2030 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2031 error = -EINVAL;
2034 break; 2032 break;
2033 }
2034 set_dumpable(me->mm, arg2);
2035 break;
2035 2036
2036 case PR_SET_UNALIGN: 2037 case PR_SET_UNALIGN:
2037 error = SET_UNALIGN_CTL(me, arg2); 2038 error = SET_UNALIGN_CTL(me, arg2);
2038 break; 2039 break;
2039 case PR_GET_UNALIGN: 2040 case PR_GET_UNALIGN:
2040 error = GET_UNALIGN_CTL(me, arg2); 2041 error = GET_UNALIGN_CTL(me, arg2);
2041 break; 2042 break;
2042 case PR_SET_FPEMU: 2043 case PR_SET_FPEMU:
2043 error = SET_FPEMU_CTL(me, arg2); 2044 error = SET_FPEMU_CTL(me, arg2);
2044 break; 2045 break;
2045 case PR_GET_FPEMU: 2046 case PR_GET_FPEMU:
2046 error = GET_FPEMU_CTL(me, arg2); 2047 error = GET_FPEMU_CTL(me, arg2);
2047 break; 2048 break;
2048 case PR_SET_FPEXC: 2049 case PR_SET_FPEXC:
2049 error = SET_FPEXC_CTL(me, arg2); 2050 error = SET_FPEXC_CTL(me, arg2);
2050 break; 2051 break;
2051 case PR_GET_FPEXC: 2052 case PR_GET_FPEXC:
2052 error = GET_FPEXC_CTL(me, arg2); 2053 error = GET_FPEXC_CTL(me, arg2);
2053 break; 2054 break;
2054 case PR_GET_TIMING: 2055 case PR_GET_TIMING:
2055 error = PR_TIMING_STATISTICAL; 2056 error = PR_TIMING_STATISTICAL;
2056 break; 2057 break;
2057 case PR_SET_TIMING: 2058 case PR_SET_TIMING:
2058 if (arg2 != PR_TIMING_STATISTICAL) 2059 if (arg2 != PR_TIMING_STATISTICAL)
2059 error = -EINVAL; 2060 error = -EINVAL;
2060 break; 2061 break;
2061 case PR_SET_NAME: 2062 case PR_SET_NAME:
2062 comm[sizeof(me->comm)-1] = 0; 2063 comm[sizeof(me->comm) - 1] = 0;
2063 if (strncpy_from_user(comm, (char __user *)arg2, 2064 if (strncpy_from_user(comm, (char __user *)arg2,
2064 sizeof(me->comm) - 1) < 0) 2065 sizeof(me->comm) - 1) < 0)
2065 return -EFAULT; 2066 return -EFAULT;
2066 set_task_comm(me, comm); 2067 set_task_comm(me, comm);
2067 proc_comm_connector(me); 2068 proc_comm_connector(me);
2068 break; 2069 break;
2069 case PR_GET_NAME: 2070 case PR_GET_NAME:
2070 get_task_comm(comm, me); 2071 get_task_comm(comm, me);
2071 if (copy_to_user((char __user *)arg2, comm, 2072 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2072 sizeof(comm))) 2073 return -EFAULT;
2073 return -EFAULT; 2074 break;
2074 break; 2075 case PR_GET_ENDIAN:
2075 case PR_GET_ENDIAN: 2076 error = GET_ENDIAN(me, arg2);
2076 error = GET_ENDIAN(me, arg2); 2077 break;
2077 break; 2078 case PR_SET_ENDIAN:
2078 case PR_SET_ENDIAN: 2079 error = SET_ENDIAN(me, arg2);
2079 error = SET_ENDIAN(me, arg2); 2080 break;
2080 break; 2081 case PR_GET_SECCOMP:
2081 case PR_GET_SECCOMP: 2082 error = prctl_get_seccomp();
2082 error = prctl_get_seccomp(); 2083 break;
2083 break; 2084 case PR_SET_SECCOMP:
2084 case PR_SET_SECCOMP: 2085 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2085 error = prctl_set_seccomp(arg2, (char __user *)arg3); 2086 break;
2086 break; 2087 case PR_GET_TSC:
2087 case PR_GET_TSC: 2088 error = GET_TSC_CTL(arg2);
2088 error = GET_TSC_CTL(arg2); 2089 break;
2089 break; 2090 case PR_SET_TSC:
2090 case PR_SET_TSC: 2091 error = SET_TSC_CTL(arg2);
2091 error = SET_TSC_CTL(arg2); 2092 break;
2092 break; 2093 case PR_TASK_PERF_EVENTS_DISABLE:
2093 case PR_TASK_PERF_EVENTS_DISABLE: 2094 error = perf_event_task_disable();
2094 error = perf_event_task_disable(); 2095 break;
2095 break; 2096 case PR_TASK_PERF_EVENTS_ENABLE:
2096 case PR_TASK_PERF_EVENTS_ENABLE: 2097 error = perf_event_task_enable();
2097 error = perf_event_task_enable(); 2098 break;
2098 break; 2099 case PR_GET_TIMERSLACK:
2099 case PR_GET_TIMERSLACK: 2100 error = current->timer_slack_ns;
2100 error = current->timer_slack_ns; 2101 break;
2101 break; 2102 case PR_SET_TIMERSLACK:
2102 case PR_SET_TIMERSLACK: 2103 if (arg2 <= 0)
2103 if (arg2 <= 0) 2104 current->timer_slack_ns =
2104 current->timer_slack_ns =
2105 current->default_timer_slack_ns; 2105 current->default_timer_slack_ns;
2106 else 2106 else
2107 current->timer_slack_ns = arg2; 2107 current->timer_slack_ns = arg2;
2108 break; 2108 break;
2109 case PR_MCE_KILL: 2109 case PR_MCE_KILL:
2110 if (arg4 | arg5) 2110 if (arg4 | arg5)
2111 return -EINVAL; 2111 return -EINVAL;
2112 switch (arg2) { 2112 switch (arg2) {
2113 case PR_MCE_KILL_CLEAR: 2113 case PR_MCE_KILL_CLEAR:
2114 if (arg3 != 0) 2114 if (arg3 != 0)
2115 return -EINVAL;
2116 current->flags &= ~PF_MCE_PROCESS;
2117 break;
2118 case PR_MCE_KILL_SET:
2119 current->flags |= PF_MCE_PROCESS;
2120 if (arg3 == PR_MCE_KILL_EARLY)
2121 current->flags |= PF_MCE_EARLY;
2122 else if (arg3 == PR_MCE_KILL_LATE)
2123 current->flags &= ~PF_MCE_EARLY;
2124 else if (arg3 == PR_MCE_KILL_DEFAULT)
2125 current->flags &=
2126 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2127 else
2128 return -EINVAL;
2129 break;
2130 default:
2131 return -EINVAL; 2115 return -EINVAL;
2132 } 2116 current->flags &= ~PF_MCE_PROCESS;
2133 break; 2117 break;
2134 case PR_MCE_KILL_GET: 2118 case PR_MCE_KILL_SET:
2135 if (arg2 | arg3 | arg4 | arg5) 2119 current->flags |= PF_MCE_PROCESS;
2136 return -EINVAL; 2120 if (arg3 == PR_MCE_KILL_EARLY)
2137 if (current->flags & PF_MCE_PROCESS) 2121 current->flags |= PF_MCE_EARLY;
2138 error = (current->flags & PF_MCE_EARLY) ? 2122 else if (arg3 == PR_MCE_KILL_LATE)
2139 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE; 2123 current->flags &= ~PF_MCE_EARLY;
2124 else if (arg3 == PR_MCE_KILL_DEFAULT)
2125 current->flags &=
2126 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2140 else 2127 else
2141 error = PR_MCE_KILL_DEFAULT;
2142 break;
2143 case PR_SET_MM:
2144 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2145 break;
2146 case PR_GET_TID_ADDRESS:
2147 error = prctl_get_tid_address(me, (int __user **)arg2);
2148 break;
2149 case PR_SET_CHILD_SUBREAPER:
2150 me->signal->is_child_subreaper = !!arg2;
2151 break;
2152 case PR_GET_CHILD_SUBREAPER:
2153 error = put_user(me->signal->is_child_subreaper,
2154 (int __user *) arg2);
2155 break;
2156 case PR_SET_NO_NEW_PRIVS:
2157 if (arg2 != 1 || arg3 || arg4 || arg5)
2158 return -EINVAL; 2128 return -EINVAL;
2159
2160 current->no_new_privs = 1;
2161 break; 2129 break;
2162 case PR_GET_NO_NEW_PRIVS:
2163 if (arg2 || arg3 || arg4 || arg5)
2164 return -EINVAL;
2165 return current->no_new_privs ? 1 : 0;
2166 default: 2130 default:
2167 error = -EINVAL; 2131 return -EINVAL;
2168 break; 2132 }
2133 break;
2134 case PR_MCE_KILL_GET:
2135 if (arg2 | arg3 | arg4 | arg5)
2136 return -EINVAL;
2137 if (current->flags & PF_MCE_PROCESS)
2138 error = (current->flags & PF_MCE_EARLY) ?
2139 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2140 else
2141 error = PR_MCE_KILL_DEFAULT;
2142 break;
2143 case PR_SET_MM:
2144 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2145 break;
2146 case PR_GET_TID_ADDRESS:
2147 error = prctl_get_tid_address(me, (int __user **)arg2);
2148 break;
2149 case PR_SET_CHILD_SUBREAPER:
2150 me->signal->is_child_subreaper = !!arg2;
2151 break;
2152 case PR_GET_CHILD_SUBREAPER:
2153 error = put_user(me->signal->is_child_subreaper,
2154 (int __user *)arg2);
2155 break;
2156 case PR_SET_NO_NEW_PRIVS:
2157 if (arg2 != 1 || arg3 || arg4 || arg5)
2158 return -EINVAL;
2159
2160 current->no_new_privs = 1;
2161 break;
2162 case PR_GET_NO_NEW_PRIVS:
2163 if (arg2 || arg3 || arg4 || arg5)
2164 return -EINVAL;
2165 return current->no_new_privs ? 1 : 0;
2166 default:
2167 error = -EINVAL;
2168 break;
2169 } 2169 }
2170 return error; 2170 return error;
2171} 2171}
diff --git a/kernel/time.c b/kernel/time.c
index c2a27dd93142..f8342a41efa6 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -240,7 +240,7 @@ EXPORT_SYMBOL(current_fs_time);
240 * Avoid unnecessary multiplications/divisions in the 240 * Avoid unnecessary multiplications/divisions in the
241 * two most common HZ cases: 241 * two most common HZ cases:
242 */ 242 */
243inline unsigned int jiffies_to_msecs(const unsigned long j) 243unsigned int jiffies_to_msecs(const unsigned long j)
244{ 244{
245#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) 245#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
246 return (MSEC_PER_SEC / HZ) * j; 246 return (MSEC_PER_SEC / HZ) * j;
@@ -256,7 +256,7 @@ inline unsigned int jiffies_to_msecs(const unsigned long j)
256} 256}
257EXPORT_SYMBOL(jiffies_to_msecs); 257EXPORT_SYMBOL(jiffies_to_msecs);
258 258
259inline unsigned int jiffies_to_usecs(const unsigned long j) 259unsigned int jiffies_to_usecs(const unsigned long j)
260{ 260{
261#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) 261#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
262 return (USEC_PER_SEC / HZ) * j; 262 return (USEC_PER_SEC / HZ) * j;