diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 5 | ||||
-rw-r--r-- | kernel/futex.c | 46 | ||||
-rw-r--r-- | kernel/signal.c | 5 | ||||
-rw-r--r-- | kernel/smpboot.c | 4 | ||||
-rw-r--r-- | kernel/softirq.c | 21 | ||||
-rw-r--r-- | kernel/stop_machine.c | 2 | ||||
-rw-r--r-- | kernel/trace/Kconfig | 24 | ||||
-rw-r--r-- | kernel/trace/trace.c | 27 | ||||
-rw-r--r-- | kernel/user_namespace.c | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 51 |
10 files changed, 115 insertions, 74 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 8d932b1c9056..1766d324d5e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1141,6 +1141,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1141 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) | 1141 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) |
1142 | return ERR_PTR(-EINVAL); | 1142 | return ERR_PTR(-EINVAL); |
1143 | 1143 | ||
1144 | if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) | ||
1145 | return ERR_PTR(-EINVAL); | ||
1146 | |||
1144 | /* | 1147 | /* |
1145 | * Thread groups must share signals as well, and detached threads | 1148 | * Thread groups must share signals as well, and detached threads |
1146 | * can only be started up within the thread group. | 1149 | * can only be started up within the thread group. |
@@ -1807,7 +1810,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
1807 | * If unsharing a user namespace must also unshare the thread. | 1810 | * If unsharing a user namespace must also unshare the thread. |
1808 | */ | 1811 | */ |
1809 | if (unshare_flags & CLONE_NEWUSER) | 1812 | if (unshare_flags & CLONE_NEWUSER) |
1810 | unshare_flags |= CLONE_THREAD; | 1813 | unshare_flags |= CLONE_THREAD | CLONE_FS; |
1811 | /* | 1814 | /* |
1812 | * If unsharing a pid namespace must also unshare the thread. | 1815 | * If unsharing a pid namespace must also unshare the thread. |
1813 | */ | 1816 | */ |
diff --git a/kernel/futex.c b/kernel/futex.c index f0090a993dab..b26dcfc02c94 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -223,7 +223,8 @@ static void drop_futex_key_refs(union futex_key *key) | |||
223 | * @rw: mapping needs to be read/write (values: VERIFY_READ, | 223 | * @rw: mapping needs to be read/write (values: VERIFY_READ, |
224 | * VERIFY_WRITE) | 224 | * VERIFY_WRITE) |
225 | * | 225 | * |
226 | * Returns a negative error code or 0 | 226 | * Return: a negative error code or 0 |
227 | * | ||
227 | * The key words are stored in *key on success. | 228 | * The key words are stored in *key on success. |
228 | * | 229 | * |
229 | * For shared mappings, it's (page->index, file_inode(vma->vm_file), | 230 | * For shared mappings, it's (page->index, file_inode(vma->vm_file), |
@@ -705,9 +706,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
705 | * be "current" except in the case of requeue pi. | 706 | * be "current" except in the case of requeue pi. |
706 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) | 707 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
707 | * | 708 | * |
708 | * Returns: | 709 | * Return: |
709 | * 0 - ready to wait | 710 | * 0 - ready to wait; |
710 | * 1 - acquired the lock | 711 | * 1 - acquired the lock; |
711 | * <0 - error | 712 | * <0 - error |
712 | * | 713 | * |
713 | * The hb->lock and futex_key refs shall be held by the caller. | 714 | * The hb->lock and futex_key refs shall be held by the caller. |
@@ -1191,9 +1192,9 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | |||
1191 | * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. | 1192 | * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. |
1192 | * hb1 and hb2 must be held by the caller. | 1193 | * hb1 and hb2 must be held by the caller. |
1193 | * | 1194 | * |
1194 | * Returns: | 1195 | * Return: |
1195 | * 0 - failed to acquire the lock atomicly | 1196 | * 0 - failed to acquire the lock atomically; |
1196 | * 1 - acquired the lock | 1197 | * 1 - acquired the lock; |
1197 | * <0 - error | 1198 | * <0 - error |
1198 | */ | 1199 | */ |
1199 | static int futex_proxy_trylock_atomic(u32 __user *pifutex, | 1200 | static int futex_proxy_trylock_atomic(u32 __user *pifutex, |
@@ -1254,8 +1255,8 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, | |||
1254 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire | 1255 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
1255 | * uaddr2 atomically on behalf of the top waiter. | 1256 | * uaddr2 atomically on behalf of the top waiter. |
1256 | * | 1257 | * |
1257 | * Returns: | 1258 | * Return: |
1258 | * >=0 - on success, the number of tasks requeued or woken | 1259 | * >=0 - on success, the number of tasks requeued or woken; |
1259 | * <0 - on error | 1260 | * <0 - on error |
1260 | */ | 1261 | */ |
1261 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags, | 1262 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
@@ -1536,8 +1537,8 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | |||
1536 | * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must | 1537 | * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must |
1537 | * be paired with exactly one earlier call to queue_me(). | 1538 | * be paired with exactly one earlier call to queue_me(). |
1538 | * | 1539 | * |
1539 | * Returns: | 1540 | * Return: |
1540 | * 1 - if the futex_q was still queued (and we removed unqueued it) | 1541 | * 1 - if the futex_q was still queued (and we removed unqueued it); |
1541 | * 0 - if the futex_q was already removed by the waking thread | 1542 | * 0 - if the futex_q was already removed by the waking thread |
1542 | */ | 1543 | */ |
1543 | static int unqueue_me(struct futex_q *q) | 1544 | static int unqueue_me(struct futex_q *q) |
@@ -1707,9 +1708,9 @@ static long futex_wait_restart(struct restart_block *restart); | |||
1707 | * the pi_state owner as well as handle race conditions that may allow us to | 1708 | * the pi_state owner as well as handle race conditions that may allow us to |
1708 | * acquire the lock. Must be called with the hb lock held. | 1709 | * acquire the lock. Must be called with the hb lock held. |
1709 | * | 1710 | * |
1710 | * Returns: | 1711 | * Return: |
1711 | * 1 - success, lock taken | 1712 | * 1 - success, lock taken; |
1712 | * 0 - success, lock not taken | 1713 | * 0 - success, lock not taken; |
1713 | * <0 - on error (-EFAULT) | 1714 | * <0 - on error (-EFAULT) |
1714 | */ | 1715 | */ |
1715 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) | 1716 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
@@ -1824,8 +1825,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | |||
1824 | * Return with the hb lock held and a q.key reference on success, and unlocked | 1825 | * Return with the hb lock held and a q.key reference on success, and unlocked |
1825 | * with no q.key reference on failure. | 1826 | * with no q.key reference on failure. |
1826 | * | 1827 | * |
1827 | * Returns: | 1828 | * Return: |
1828 | * 0 - uaddr contains val and hb has been locked | 1829 | * 0 - uaddr contains val and hb has been locked; |
1829 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked | 1830 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked |
1830 | */ | 1831 | */ |
1831 | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, | 1832 | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, |
@@ -2203,9 +2204,9 @@ pi_faulted: | |||
2203 | * the wakeup and return the appropriate error code to the caller. Must be | 2204 | * the wakeup and return the appropriate error code to the caller. Must be |
2204 | * called with the hb lock held. | 2205 | * called with the hb lock held. |
2205 | * | 2206 | * |
2206 | * Returns | 2207 | * Return: |
2207 | * 0 - no early wakeup detected | 2208 | * 0 = no early wakeup detected; |
2208 | * <0 - -ETIMEDOUT or -ERESTARTNOINTR | 2209 | * <0 = -ETIMEDOUT or -ERESTARTNOINTR |
2209 | */ | 2210 | */ |
2210 | static inline | 2211 | static inline |
2211 | int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | 2212 | int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, |
@@ -2247,7 +2248,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2247 | * @val: the expected value of uaddr | 2248 | * @val: the expected value of uaddr |
2248 | * @abs_time: absolute timeout | 2249 | * @abs_time: absolute timeout |
2249 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all | 2250 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
2250 | * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) | ||
2251 | * @uaddr2: the pi futex we will take prior to returning to user-space | 2251 | * @uaddr2: the pi futex we will take prior to returning to user-space |
2252 | * | 2252 | * |
2253 | * The caller will wait on uaddr and will be requeued by futex_requeue() to | 2253 | * The caller will wait on uaddr and will be requeued by futex_requeue() to |
@@ -2258,7 +2258,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2258 | * there was a need to. | 2258 | * there was a need to. |
2259 | * | 2259 | * |
2260 | * We call schedule in futex_wait_queue_me() when we enqueue and return there | 2260 | * We call schedule in futex_wait_queue_me() when we enqueue and return there |
2261 | * via the following: | 2261 | * via the following-- |
2262 | * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() | 2262 | * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() |
2263 | * 2) wakeup on uaddr2 after a requeue | 2263 | * 2) wakeup on uaddr2 after a requeue |
2264 | * 3) signal | 2264 | * 3) signal |
@@ -2276,8 +2276,8 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
2276 | * | 2276 | * |
2277 | * If 4 or 7, we cleanup and return with -ETIMEDOUT. | 2277 | * If 4 or 7, we cleanup and return with -ETIMEDOUT. |
2278 | * | 2278 | * |
2279 | * Returns: | 2279 | * Return: |
2280 | * 0 - On success | 2280 | * 0 - On success; |
2281 | * <0 - On error | 2281 | * <0 - On error |
2282 | */ | 2282 | */ |
2283 | static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | 2283 | static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
diff --git a/kernel/signal.c b/kernel/signal.c index 2ec870a4c3c4..dd72567767d9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -485,6 +485,9 @@ flush_signal_handlers(struct task_struct *t, int force_default) | |||
485 | if (force_default || ka->sa.sa_handler != SIG_IGN) | 485 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
486 | ka->sa.sa_handler = SIG_DFL; | 486 | ka->sa.sa_handler = SIG_DFL; |
487 | ka->sa.sa_flags = 0; | 487 | ka->sa.sa_flags = 0; |
488 | #ifdef __ARCH_HAS_SA_RESTORER | ||
489 | ka->sa.sa_restorer = NULL; | ||
490 | #endif | ||
488 | sigemptyset(&ka->sa.sa_mask); | 491 | sigemptyset(&ka->sa.sa_mask); |
489 | ka++; | 492 | ka++; |
490 | } | 493 | } |
@@ -2682,7 +2685,7 @@ static int do_sigpending(void *set, unsigned long sigsetsize) | |||
2682 | /** | 2685 | /** |
2683 | * sys_rt_sigpending - examine a pending signal that has been raised | 2686 | * sys_rt_sigpending - examine a pending signal that has been raised |
2684 | * while blocked | 2687 | * while blocked |
2685 | * @set: stores pending signals | 2688 | * @uset: stores pending signals |
2686 | * @sigsetsize: size of sigset_t type or larger | 2689 | * @sigsetsize: size of sigset_t type or larger |
2687 | */ | 2690 | */ |
2688 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) | 2691 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index b9bde5727829..8eaed9aa9cf0 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data) | |||
131 | continue; | 131 | continue; |
132 | } | 132 | } |
133 | 133 | ||
134 | //BUG_ON(td->cpu != smp_processor_id()); | 134 | BUG_ON(td->cpu != smp_processor_id()); |
135 | 135 | ||
136 | /* Check for state change setup */ | 136 | /* Check for state change setup */ |
137 | switch (td->status) { | 137 | switch (td->status) { |
@@ -209,6 +209,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp | |||
209 | { | 209 | { |
210 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); | 210 | struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); |
211 | 211 | ||
212 | if (ht->pre_unpark) | ||
213 | ht->pre_unpark(cpu); | ||
212 | kthread_unpark(tsk); | 214 | kthread_unpark(tsk); |
213 | } | 215 | } |
214 | 216 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index b4d252fd195b..14d7758074aa 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -323,18 +323,10 @@ void irq_enter(void) | |||
323 | 323 | ||
324 | static inline void invoke_softirq(void) | 324 | static inline void invoke_softirq(void) |
325 | { | 325 | { |
326 | if (!force_irqthreads) { | 326 | if (!force_irqthreads) |
327 | #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED | ||
328 | __do_softirq(); | 327 | __do_softirq(); |
329 | #else | 328 | else |
330 | do_softirq(); | ||
331 | #endif | ||
332 | } else { | ||
333 | __local_bh_disable((unsigned long)__builtin_return_address(0), | ||
334 | SOFTIRQ_OFFSET); | ||
335 | wakeup_softirqd(); | 329 | wakeup_softirqd(); |
336 | __local_bh_enable(SOFTIRQ_OFFSET); | ||
337 | } | ||
338 | } | 330 | } |
339 | 331 | ||
340 | /* | 332 | /* |
@@ -342,9 +334,15 @@ static inline void invoke_softirq(void) | |||
342 | */ | 334 | */ |
343 | void irq_exit(void) | 335 | void irq_exit(void) |
344 | { | 336 | { |
337 | #ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED | ||
338 | local_irq_disable(); | ||
339 | #else | ||
340 | WARN_ON_ONCE(!irqs_disabled()); | ||
341 | #endif | ||
342 | |||
345 | account_irq_exit_time(current); | 343 | account_irq_exit_time(current); |
346 | trace_hardirq_exit(); | 344 | trace_hardirq_exit(); |
347 | sub_preempt_count(IRQ_EXIT_OFFSET); | 345 | sub_preempt_count(HARDIRQ_OFFSET); |
348 | if (!in_interrupt() && local_softirq_pending()) | 346 | if (!in_interrupt() && local_softirq_pending()) |
349 | invoke_softirq(); | 347 | invoke_softirq(); |
350 | 348 | ||
@@ -354,7 +352,6 @@ void irq_exit(void) | |||
354 | tick_nohz_irq_exit(); | 352 | tick_nohz_irq_exit(); |
355 | #endif | 353 | #endif |
356 | rcu_irq_exit(); | 354 | rcu_irq_exit(); |
357 | sched_preempt_enable_no_resched(); | ||
358 | } | 355 | } |
359 | 356 | ||
360 | /* | 357 | /* |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 95d178c62d5a..c09f2955ae30 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -336,7 +336,7 @@ static struct smp_hotplug_thread cpu_stop_threads = { | |||
336 | .create = cpu_stop_create, | 336 | .create = cpu_stop_create, |
337 | .setup = cpu_stop_unpark, | 337 | .setup = cpu_stop_unpark, |
338 | .park = cpu_stop_park, | 338 | .park = cpu_stop_park, |
339 | .unpark = cpu_stop_unpark, | 339 | .pre_unpark = cpu_stop_unpark, |
340 | .selfparking = true, | 340 | .selfparking = true, |
341 | }; | 341 | }; |
342 | 342 | ||
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 192473b22799..fc382d6e2765 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -414,24 +414,28 @@ config PROBE_EVENTS | |||
414 | def_bool n | 414 | def_bool n |
415 | 415 | ||
416 | config DYNAMIC_FTRACE | 416 | config DYNAMIC_FTRACE |
417 | bool "enable/disable ftrace tracepoints dynamically" | 417 | bool "enable/disable function tracing dynamically" |
418 | depends on FUNCTION_TRACER | 418 | depends on FUNCTION_TRACER |
419 | depends on HAVE_DYNAMIC_FTRACE | 419 | depends on HAVE_DYNAMIC_FTRACE |
420 | default y | 420 | default y |
421 | help | 421 | help |
422 | This option will modify all the calls to ftrace dynamically | 422 | This option will modify all the calls to function tracing |
423 | (will patch them out of the binary image and replace them | 423 | dynamically (will patch them out of the binary image and |
424 | with a No-Op instruction) as they are called. A table is | 424 | replace them with a No-Op instruction) on boot up. During |
425 | created to dynamically enable them again. | 425 | compile time, a table is made of all the locations that ftrace |
426 | can function trace, and this table is linked into the kernel | ||
427 | image. When this is enabled, functions can be individually | ||
428 | enabled, and the functions not enabled will not affect | ||
429 | performance of the system. | ||
430 | |||
431 | See the files in /sys/kernel/debug/tracing: | ||
432 | available_filter_functions | ||
433 | set_ftrace_filter | ||
434 | set_ftrace_notrace | ||
426 | 435 | ||
427 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but | 436 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but |
428 | otherwise has native performance as long as no tracing is active. | 437 | otherwise has native performance as long as no tracing is active. |
429 | 438 | ||
430 | The changes to the code are done by a kernel thread that | ||
431 | wakes up once a second and checks to see if any ftrace calls | ||
432 | were made. If so, it runs stop_machine (stops all CPUS) | ||
433 | and modifies the code to jump over the call to ftrace. | ||
434 | |||
435 | config DYNAMIC_FTRACE_WITH_REGS | 439 | config DYNAMIC_FTRACE_WITH_REGS |
436 | def_bool y | 440 | def_bool y |
437 | depends on DYNAMIC_FTRACE | 441 | depends on DYNAMIC_FTRACE |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c2e2c2310374..1f835a83cb2c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -2400,6 +2400,27 @@ static void test_ftrace_alive(struct seq_file *m) | |||
2400 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); | 2400 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); |
2401 | } | 2401 | } |
2402 | 2402 | ||
2403 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
2404 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) | ||
2405 | { | ||
2406 | if (iter->trace->allocated_snapshot) | ||
2407 | seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); | ||
2408 | else | ||
2409 | seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); | ||
2410 | |||
2411 | seq_printf(m, "# Snapshot commands:\n"); | ||
2412 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); | ||
2413 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | ||
2414 | seq_printf(m, "# Takes a snapshot of the main buffer.\n"); | ||
2415 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); | ||
2416 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | ||
2417 | seq_printf(m, "# is not a '0' or '1')\n"); | ||
2418 | } | ||
2419 | #else | ||
2420 | /* Should never be called */ | ||
2421 | static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } | ||
2422 | #endif | ||
2423 | |||
2403 | static int s_show(struct seq_file *m, void *v) | 2424 | static int s_show(struct seq_file *m, void *v) |
2404 | { | 2425 | { |
2405 | struct trace_iterator *iter = v; | 2426 | struct trace_iterator *iter = v; |
@@ -2411,7 +2432,9 @@ static int s_show(struct seq_file *m, void *v) | |||
2411 | seq_puts(m, "#\n"); | 2432 | seq_puts(m, "#\n"); |
2412 | test_ftrace_alive(m); | 2433 | test_ftrace_alive(m); |
2413 | } | 2434 | } |
2414 | if (iter->trace && iter->trace->print_header) | 2435 | if (iter->snapshot && trace_empty(iter)) |
2436 | print_snapshot_help(m, iter); | ||
2437 | else if (iter->trace && iter->trace->print_header) | ||
2415 | iter->trace->print_header(m); | 2438 | iter->trace->print_header(m); |
2416 | else | 2439 | else |
2417 | trace_default_header(m); | 2440 | trace_default_header(m); |
@@ -4144,8 +4167,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
4144 | default: | 4167 | default: |
4145 | if (current_trace->allocated_snapshot) | 4168 | if (current_trace->allocated_snapshot) |
4146 | tracing_reset_online_cpus(&max_tr); | 4169 | tracing_reset_online_cpus(&max_tr); |
4147 | else | ||
4148 | ret = -EINVAL; | ||
4149 | break; | 4170 | break; |
4150 | } | 4171 | } |
4151 | 4172 | ||
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 8b650837083e..b14f4d342043 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <linux/ctype.h> | 22 | #include <linux/ctype.h> |
23 | #include <linux/projid.h> | 23 | #include <linux/projid.h> |
24 | #include <linux/fs_struct.h> | ||
24 | 25 | ||
25 | static struct kmem_cache *user_ns_cachep __read_mostly; | 26 | static struct kmem_cache *user_ns_cachep __read_mostly; |
26 | 27 | ||
@@ -837,6 +838,9 @@ static int userns_install(struct nsproxy *nsproxy, void *ns) | |||
837 | if (atomic_read(¤t->mm->mm_users) > 1) | 838 | if (atomic_read(¤t->mm->mm_users) > 1) |
838 | return -EINVAL; | 839 | return -EINVAL; |
839 | 840 | ||
841 | if (current->fs->users != 1) | ||
842 | return -EINVAL; | ||
843 | |||
840 | if (!ns_capable(user_ns, CAP_SYS_ADMIN)) | 844 | if (!ns_capable(user_ns, CAP_SYS_ADMIN)) |
841 | return -EPERM; | 845 | return -EPERM; |
842 | 846 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811eb..b48cd597145d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
457 | int ret; | 457 | int ret; |
458 | 458 | ||
459 | mutex_lock(&worker_pool_idr_mutex); | 459 | mutex_lock(&worker_pool_idr_mutex); |
460 | idr_pre_get(&worker_pool_idr, GFP_KERNEL); | 460 | ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); |
461 | ret = idr_get_new(&worker_pool_idr, pool, &pool->id); | 461 | if (ret >= 0) |
462 | pool->id = ret; | ||
462 | mutex_unlock(&worker_pool_idr_mutex); | 463 | mutex_unlock(&worker_pool_idr_mutex); |
463 | 464 | ||
464 | return ret; | 465 | return ret < 0 ? ret : 0; |
465 | } | 466 | } |
466 | 467 | ||
467 | /* | 468 | /* |
@@ -3446,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) | |||
3446 | 3447 | ||
3447 | spin_unlock_irq(&pool->lock); | 3448 | spin_unlock_irq(&pool->lock); |
3448 | mutex_unlock(&pool->assoc_mutex); | 3449 | mutex_unlock(&pool->assoc_mutex); |
3449 | } | ||
3450 | 3450 | ||
3451 | /* | 3451 | /* |
3452 | * Call schedule() so that we cross rq->lock and thus can guarantee | 3452 | * Call schedule() so that we cross rq->lock and thus can |
3453 | * sched callbacks see the %WORKER_UNBOUND flag. This is necessary | 3453 | * guarantee sched callbacks see the %WORKER_UNBOUND flag. |
3454 | * as scheduler callbacks may be invoked from other cpus. | 3454 | * This is necessary as scheduler callbacks may be invoked |
3455 | */ | 3455 | * from other cpus. |
3456 | schedule(); | 3456 | */ |
3457 | schedule(); | ||
3457 | 3458 | ||
3458 | /* | 3459 | /* |
3459 | * Sched callbacks are disabled now. Zap nr_running. After this, | 3460 | * Sched callbacks are disabled now. Zap nr_running. |
3460 | * nr_running stays zero and need_more_worker() and keep_working() | 3461 | * After this, nr_running stays zero and need_more_worker() |
3461 | * are always true as long as the worklist is not empty. Pools on | 3462 | * and keep_working() are always true as long as the |
3462 | * @cpu now behave as unbound (in terms of concurrency management) | 3463 | * worklist is not empty. This pool now behaves as an |
3463 | * pools which are served by workers tied to the CPU. | 3464 | * unbound (in terms of concurrency management) pool which |
3464 | * | 3465 | * are served by workers tied to the pool. |
3465 | * On return from this function, the current worker would trigger | 3466 | */ |
3466 | * unbound chain execution of pending work items if other workers | ||
3467 | * didn't already. | ||
3468 | */ | ||
3469 | for_each_std_worker_pool(pool, cpu) | ||
3470 | atomic_set(&pool->nr_running, 0); | 3467 | atomic_set(&pool->nr_running, 0); |
3468 | |||
3469 | /* | ||
3470 | * With concurrency management just turned off, a busy | ||
3471 | * worker blocking could lead to lengthy stalls. Kick off | ||
3472 | * unbound chain execution of currently pending work items. | ||
3473 | */ | ||
3474 | spin_lock_irq(&pool->lock); | ||
3475 | wake_up_worker(pool); | ||
3476 | spin_unlock_irq(&pool->lock); | ||
3477 | } | ||
3471 | } | 3478 | } |
3472 | 3479 | ||
3473 | /* | 3480 | /* |