aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/futex.c46
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/trace/Kconfig24
-rw-r--r--kernel/trace/trace.c27
-rw-r--r--kernel/user_namespace.c4
-rw-r--r--kernel/workqueue.c7
7 files changed, 77 insertions, 41 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 8d932b1c9056..1766d324d5e3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1141,6 +1141,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1141 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1141 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1142 return ERR_PTR(-EINVAL); 1142 return ERR_PTR(-EINVAL);
1143 1143
1144 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1145 return ERR_PTR(-EINVAL);
1146
1144 /* 1147 /*
1145 * Thread groups must share signals as well, and detached threads 1148 * Thread groups must share signals as well, and detached threads
1146 * can only be started up within the thread group. 1149 * can only be started up within the thread group.
@@ -1807,7 +1810,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1807 * If unsharing a user namespace must also unshare the thread. 1810 * If unsharing a user namespace must also unshare the thread.
1808 */ 1811 */
1809 if (unshare_flags & CLONE_NEWUSER) 1812 if (unshare_flags & CLONE_NEWUSER)
1810 unshare_flags |= CLONE_THREAD; 1813 unshare_flags |= CLONE_THREAD | CLONE_FS;
1811 /* 1814 /*
1812 * If unsharing a pid namespace must also unshare the thread. 1815 * If unsharing a pid namespace must also unshare the thread.
1813 */ 1816 */
diff --git a/kernel/futex.c b/kernel/futex.c
index f0090a993dab..b26dcfc02c94 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -223,7 +223,8 @@ static void drop_futex_key_refs(union futex_key *key)
223 * @rw: mapping needs to be read/write (values: VERIFY_READ, 223 * @rw: mapping needs to be read/write (values: VERIFY_READ,
224 * VERIFY_WRITE) 224 * VERIFY_WRITE)
225 * 225 *
226 * Returns a negative error code or 0 226 * Return: a negative error code or 0
227 *
227 * The key words are stored in *key on success. 228 * The key words are stored in *key on success.
228 * 229 *
229 * For shared mappings, it's (page->index, file_inode(vma->vm_file), 230 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
@@ -705,9 +706,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
705 * be "current" except in the case of requeue pi. 706 * be "current" except in the case of requeue pi.
706 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) 707 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
707 * 708 *
708 * Returns: 709 * Return:
709 * 0 - ready to wait 710 * 0 - ready to wait;
710 * 1 - acquired the lock 711 * 1 - acquired the lock;
711 * <0 - error 712 * <0 - error
712 * 713 *
713 * The hb->lock and futex_key refs shall be held by the caller. 714 * The hb->lock and futex_key refs shall be held by the caller.
@@ -1191,9 +1192,9 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1191 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. 1192 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1192 * hb1 and hb2 must be held by the caller. 1193 * hb1 and hb2 must be held by the caller.
1193 * 1194 *
1194 * Returns: 1195 * Return:
1195 * 0 - failed to acquire the lock atomicly 1196 * 0 - failed to acquire the lock atomically;
1196 * 1 - acquired the lock 1197 * 1 - acquired the lock;
1197 * <0 - error 1198 * <0 - error
1198 */ 1199 */
1199static int futex_proxy_trylock_atomic(u32 __user *pifutex, 1200static int futex_proxy_trylock_atomic(u32 __user *pifutex,
@@ -1254,8 +1255,8 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1254 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire 1255 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1255 * uaddr2 atomically on behalf of the top waiter. 1256 * uaddr2 atomically on behalf of the top waiter.
1256 * 1257 *
1257 * Returns: 1258 * Return:
1258 * >=0 - on success, the number of tasks requeued or woken 1259 * >=0 - on success, the number of tasks requeued or woken;
1259 * <0 - on error 1260 * <0 - on error
1260 */ 1261 */
1261static int futex_requeue(u32 __user *uaddr1, unsigned int flags, 1262static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
@@ -1536,8 +1537,8 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1536 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must 1537 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1537 * be paired with exactly one earlier call to queue_me(). 1538 * be paired with exactly one earlier call to queue_me().
1538 * 1539 *
1539 * Returns: 1540 * Return:
1540 * 1 - if the futex_q was still queued (and we removed unqueued it) 1541 * 1 - if the futex_q was still queued (and we removed unqueued it);
1541 * 0 - if the futex_q was already removed by the waking thread 1542 * 0 - if the futex_q was already removed by the waking thread
1542 */ 1543 */
1543static int unqueue_me(struct futex_q *q) 1544static int unqueue_me(struct futex_q *q)
@@ -1707,9 +1708,9 @@ static long futex_wait_restart(struct restart_block *restart);
1707 * the pi_state owner as well as handle race conditions that may allow us to 1708 * the pi_state owner as well as handle race conditions that may allow us to
1708 * acquire the lock. Must be called with the hb lock held. 1709 * acquire the lock. Must be called with the hb lock held.
1709 * 1710 *
1710 * Returns: 1711 * Return:
1711 * 1 - success, lock taken 1712 * 1 - success, lock taken;
1712 * 0 - success, lock not taken 1713 * 0 - success, lock not taken;
1713 * <0 - on error (-EFAULT) 1714 * <0 - on error (-EFAULT)
1714 */ 1715 */
1715static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) 1716static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
@@ -1824,8 +1825,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1824 * Return with the hb lock held and a q.key reference on success, and unlocked 1825 * Return with the hb lock held and a q.key reference on success, and unlocked
1825 * with no q.key reference on failure. 1826 * with no q.key reference on failure.
1826 * 1827 *
1827 * Returns: 1828 * Return:
1828 * 0 - uaddr contains val and hb has been locked 1829 * 0 - uaddr contains val and hb has been locked;
1829 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked 1830 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
1830 */ 1831 */
1831static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, 1832static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
@@ -2203,9 +2204,9 @@ pi_faulted:
2203 * the wakeup and return the appropriate error code to the caller. Must be 2204 * the wakeup and return the appropriate error code to the caller. Must be
2204 * called with the hb lock held. 2205 * called with the hb lock held.
2205 * 2206 *
2206 * Returns 2207 * Return:
2207 * 0 - no early wakeup detected 2208 * 0 = no early wakeup detected;
2208 * <0 - -ETIMEDOUT or -ERESTARTNOINTR 2209 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2209 */ 2210 */
2210static inline 2211static inline
2211int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, 2212int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
@@ -2247,7 +2248,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2247 * @val: the expected value of uaddr 2248 * @val: the expected value of uaddr
2248 * @abs_time: absolute timeout 2249 * @abs_time: absolute timeout
2249 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all 2250 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2250 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2251 * @uaddr2: the pi futex we will take prior to returning to user-space 2251 * @uaddr2: the pi futex we will take prior to returning to user-space
2252 * 2252 *
2253 * The caller will wait on uaddr and will be requeued by futex_requeue() to 2253 * The caller will wait on uaddr and will be requeued by futex_requeue() to
@@ -2258,7 +2258,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2258 * there was a need to. 2258 * there was a need to.
2259 * 2259 *
2260 * We call schedule in futex_wait_queue_me() when we enqueue and return there 2260 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2261 * via the following: 2261 * via the following--
2262 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() 2262 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2263 * 2) wakeup on uaddr2 after a requeue 2263 * 2) wakeup on uaddr2 after a requeue
2264 * 3) signal 2264 * 3) signal
@@ -2276,8 +2276,8 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2276 * 2276 *
2277 * If 4 or 7, we cleanup and return with -ETIMEDOUT. 2277 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2278 * 2278 *
2279 * Returns: 2279 * Return:
2280 * 0 - On success 2280 * 0 - On success;
2281 * <0 - On error 2281 * <0 - On error
2282 */ 2282 */
2283static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, 2283static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
diff --git a/kernel/signal.c b/kernel/signal.c
index 2ec870a4c3c4..dd72567767d9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -485,6 +485,9 @@ flush_signal_handlers(struct task_struct *t, int force_default)
485 if (force_default || ka->sa.sa_handler != SIG_IGN) 485 if (force_default || ka->sa.sa_handler != SIG_IGN)
486 ka->sa.sa_handler = SIG_DFL; 486 ka->sa.sa_handler = SIG_DFL;
487 ka->sa.sa_flags = 0; 487 ka->sa.sa_flags = 0;
488#ifdef __ARCH_HAS_SA_RESTORER
489 ka->sa.sa_restorer = NULL;
490#endif
488 sigemptyset(&ka->sa.sa_mask); 491 sigemptyset(&ka->sa.sa_mask);
489 ka++; 492 ka++;
490 } 493 }
@@ -2682,7 +2685,7 @@ static int do_sigpending(void *set, unsigned long sigsetsize)
2682/** 2685/**
2683 * sys_rt_sigpending - examine a pending signal that has been raised 2686 * sys_rt_sigpending - examine a pending signal that has been raised
2684 * while blocked 2687 * while blocked
2685 * @set: stores pending signals 2688 * @uset: stores pending signals
2686 * @sigsetsize: size of sigset_t type or larger 2689 * @sigsetsize: size of sigset_t type or larger
2687 */ 2690 */
2688SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 2691SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 192473b22799..fc382d6e2765 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -414,24 +414,28 @@ config PROBE_EVENTS
414 def_bool n 414 def_bool n
415 415
416config DYNAMIC_FTRACE 416config DYNAMIC_FTRACE
417 bool "enable/disable ftrace tracepoints dynamically" 417 bool "enable/disable function tracing dynamically"
418 depends on FUNCTION_TRACER 418 depends on FUNCTION_TRACER
419 depends on HAVE_DYNAMIC_FTRACE 419 depends on HAVE_DYNAMIC_FTRACE
420 default y 420 default y
421 help 421 help
422 This option will modify all the calls to ftrace dynamically 422 This option will modify all the calls to function tracing
423 (will patch them out of the binary image and replace them 423 dynamically (will patch them out of the binary image and
424 with a No-Op instruction) as they are called. A table is 424 replace them with a No-Op instruction) on boot up. During
425 created to dynamically enable them again. 425 compile time, a table is made of all the locations that ftrace
426 can function trace, and this table is linked into the kernel
427 image. When this is enabled, functions can be individually
428 enabled, and the functions not enabled will not affect
429 performance of the system.
430
431 See the files in /sys/kernel/debug/tracing:
432 available_filter_functions
433 set_ftrace_filter
434 set_ftrace_notrace
426 435
427 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 436 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
428 otherwise has native performance as long as no tracing is active. 437 otherwise has native performance as long as no tracing is active.
429 438
430 The changes to the code are done by a kernel thread that
431 wakes up once a second and checks to see if any ftrace calls
432 were made. If so, it runs stop_machine (stops all CPUS)
433 and modifies the code to jump over the call to ftrace.
434
435config DYNAMIC_FTRACE_WITH_REGS 439config DYNAMIC_FTRACE_WITH_REGS
436 def_bool y 440 def_bool y
437 depends on DYNAMIC_FTRACE 441 depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c2e2c2310374..1f835a83cb2c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2400,6 +2400,27 @@ static void test_ftrace_alive(struct seq_file *m)
2400 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2400 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2401} 2401}
2402 2402
2403#ifdef CONFIG_TRACER_MAX_TRACE
2404static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2405{
2406 if (iter->trace->allocated_snapshot)
2407 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2408 else
2409 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2410
2411 seq_printf(m, "# Snapshot commands:\n");
2412 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2413 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2414 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2415 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2416 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2417 seq_printf(m, "# is not a '0' or '1')\n");
2418}
2419#else
2420/* Should never be called */
2421static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2422#endif
2423
2403static int s_show(struct seq_file *m, void *v) 2424static int s_show(struct seq_file *m, void *v)
2404{ 2425{
2405 struct trace_iterator *iter = v; 2426 struct trace_iterator *iter = v;
@@ -2411,7 +2432,9 @@ static int s_show(struct seq_file *m, void *v)
2411 seq_puts(m, "#\n"); 2432 seq_puts(m, "#\n");
2412 test_ftrace_alive(m); 2433 test_ftrace_alive(m);
2413 } 2434 }
2414 if (iter->trace && iter->trace->print_header) 2435 if (iter->snapshot && trace_empty(iter))
2436 print_snapshot_help(m, iter);
2437 else if (iter->trace && iter->trace->print_header)
2415 iter->trace->print_header(m); 2438 iter->trace->print_header(m);
2416 else 2439 else
2417 trace_default_header(m); 2440 trace_default_header(m);
@@ -4144,8 +4167,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4144 default: 4167 default:
4145 if (current_trace->allocated_snapshot) 4168 if (current_trace->allocated_snapshot)
4146 tracing_reset_online_cpus(&max_tr); 4169 tracing_reset_online_cpus(&max_tr);
4147 else
4148 ret = -EINVAL;
4149 break; 4170 break;
4150 } 4171 }
4151 4172
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 8b650837083e..b14f4d342043 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <linux/projid.h> 23#include <linux/projid.h>
24#include <linux/fs_struct.h>
24 25
25static struct kmem_cache *user_ns_cachep __read_mostly; 26static struct kmem_cache *user_ns_cachep __read_mostly;
26 27
@@ -837,6 +838,9 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
837 if (atomic_read(&current->mm->mm_users) > 1) 838 if (atomic_read(&current->mm->mm_users) > 1)
838 return -EINVAL; 839 return -EINVAL;
839 840
841 if (current->fs->users != 1)
842 return -EINVAL;
843
840 if (!ns_capable(user_ns, CAP_SYS_ADMIN)) 844 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
841 return -EPERM; 845 return -EPERM;
842 846
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 81f2457811eb..55fac5b991b7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool)
457 int ret; 457 int ret;
458 458
459 mutex_lock(&worker_pool_idr_mutex); 459 mutex_lock(&worker_pool_idr_mutex);
460 idr_pre_get(&worker_pool_idr, GFP_KERNEL); 460 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
461 ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 461 if (ret >= 0)
462 pool->id = ret;
462 mutex_unlock(&worker_pool_idr_mutex); 463 mutex_unlock(&worker_pool_idr_mutex);
463 464
464 return ret; 465 return ret < 0 ? ret : 0;
465} 466}
466 467
467/* 468/*