aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-03-21 06:03:10 -0400
committerIngo Molnar <mingo@kernel.org>2013-03-21 06:03:10 -0400
commit3bf2391729822e591dcfbbd1e9dd2f450968cdcb (patch)
tree80a0499e57a4cc95b6caea559400b5226ebe606f /kernel
parent86e213e1d901fbeaf6e57d13c5edd925fadddcbe (diff)
parentfd4a5aef002bb57e8a35ed34d8a878034b9bde94 (diff)
Merge branch 'perf/urgent' into perf/core
Merge in all pending fixes, before pulling the latest development bits from Arnaldo - which will involve merge conflicts. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c8
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/futex.c46
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/smpboot.c4
-rw-r--r--kernel/softirq.c21
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/trace/Kconfig24
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/trace.c86
-rw-r--r--kernel/trace/trace.h6
-rw-r--r--kernel/trace/trace_irqsoff.c19
-rw-r--r--kernel/trace/trace_sched_wakeup.c18
-rw-r--r--kernel/user_namespace.c4
-rw-r--r--kernel/workqueue.c7
15 files changed, 178 insertions, 81 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index efb75b3a69ad..7b4a55d41efc 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4449,12 +4449,15 @@ static void perf_event_task_event(struct perf_task_event *task_event)
4449 if (ctxn < 0) 4449 if (ctxn < 0)
4450 goto next; 4450 goto next;
4451 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); 4451 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4452 if (ctx)
4453 perf_event_task_ctx(ctx, task_event);
4452 } 4454 }
4453 if (ctx)
4454 perf_event_task_ctx(ctx, task_event);
4455next: 4455next:
4456 put_cpu_ptr(pmu->pmu_cpu_context); 4456 put_cpu_ptr(pmu->pmu_cpu_context);
4457 } 4457 }
4458 if (task_event->task_ctx)
4459 perf_event_task_ctx(task_event->task_ctx, task_event);
4460
4458 rcu_read_unlock(); 4461 rcu_read_unlock();
4459} 4462}
4460 4463
@@ -5662,6 +5665,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event)
5662 event->attr.sample_period = NSEC_PER_SEC / freq; 5665 event->attr.sample_period = NSEC_PER_SEC / freq;
5663 hwc->sample_period = event->attr.sample_period; 5666 hwc->sample_period = event->attr.sample_period;
5664 local64_set(&hwc->period_left, hwc->sample_period); 5667 local64_set(&hwc->period_left, hwc->sample_period);
5668 hwc->last_period = hwc->sample_period;
5665 event->attr.freq = 0; 5669 event->attr.freq = 0;
5666 } 5670 }
5667} 5671}
diff --git a/kernel/fork.c b/kernel/fork.c
index 8d932b1c9056..1766d324d5e3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1141,6 +1141,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1141 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) 1141 if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1142 return ERR_PTR(-EINVAL); 1142 return ERR_PTR(-EINVAL);
1143 1143
1144 if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
1145 return ERR_PTR(-EINVAL);
1146
1144 /* 1147 /*
1145 * Thread groups must share signals as well, and detached threads 1148 * Thread groups must share signals as well, and detached threads
1146 * can only be started up within the thread group. 1149 * can only be started up within the thread group.
@@ -1807,7 +1810,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1807 * If unsharing a user namespace must also unshare the thread. 1810 * If unsharing a user namespace must also unshare the thread.
1808 */ 1811 */
1809 if (unshare_flags & CLONE_NEWUSER) 1812 if (unshare_flags & CLONE_NEWUSER)
1810 unshare_flags |= CLONE_THREAD; 1813 unshare_flags |= CLONE_THREAD | CLONE_FS;
1811 /* 1814 /*
1812 * If unsharing a pid namespace must also unshare the thread. 1815 * If unsharing a pid namespace must also unshare the thread.
1813 */ 1816 */
diff --git a/kernel/futex.c b/kernel/futex.c
index f0090a993dab..b26dcfc02c94 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -223,7 +223,8 @@ static void drop_futex_key_refs(union futex_key *key)
223 * @rw: mapping needs to be read/write (values: VERIFY_READ, 223 * @rw: mapping needs to be read/write (values: VERIFY_READ,
224 * VERIFY_WRITE) 224 * VERIFY_WRITE)
225 * 225 *
226 * Returns a negative error code or 0 226 * Return: a negative error code or 0
227 *
227 * The key words are stored in *key on success. 228 * The key words are stored in *key on success.
228 * 229 *
229 * For shared mappings, it's (page->index, file_inode(vma->vm_file), 230 * For shared mappings, it's (page->index, file_inode(vma->vm_file),
@@ -705,9 +706,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
705 * be "current" except in the case of requeue pi. 706 * be "current" except in the case of requeue pi.
706 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) 707 * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0)
707 * 708 *
708 * Returns: 709 * Return:
709 * 0 - ready to wait 710 * 0 - ready to wait;
710 * 1 - acquired the lock 711 * 1 - acquired the lock;
711 * <0 - error 712 * <0 - error
712 * 713 *
713 * The hb->lock and futex_key refs shall be held by the caller. 714 * The hb->lock and futex_key refs shall be held by the caller.
@@ -1191,9 +1192,9 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1191 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. 1192 * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit.
1192 * hb1 and hb2 must be held by the caller. 1193 * hb1 and hb2 must be held by the caller.
1193 * 1194 *
1194 * Returns: 1195 * Return:
1195 * 0 - failed to acquire the lock atomicly 1196 * 0 - failed to acquire the lock atomically;
1196 * 1 - acquired the lock 1197 * 1 - acquired the lock;
1197 * <0 - error 1198 * <0 - error
1198 */ 1199 */
1199static int futex_proxy_trylock_atomic(u32 __user *pifutex, 1200static int futex_proxy_trylock_atomic(u32 __user *pifutex,
@@ -1254,8 +1255,8 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1254 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire 1255 * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire
1255 * uaddr2 atomically on behalf of the top waiter. 1256 * uaddr2 atomically on behalf of the top waiter.
1256 * 1257 *
1257 * Returns: 1258 * Return:
1258 * >=0 - on success, the number of tasks requeued or woken 1259 * >=0 - on success, the number of tasks requeued or woken;
1259 * <0 - on error 1260 * <0 - on error
1260 */ 1261 */
1261static int futex_requeue(u32 __user *uaddr1, unsigned int flags, 1262static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
@@ -1536,8 +1537,8 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
1536 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must 1537 * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must
1537 * be paired with exactly one earlier call to queue_me(). 1538 * be paired with exactly one earlier call to queue_me().
1538 * 1539 *
1539 * Returns: 1540 * Return:
1540 * 1 - if the futex_q was still queued (and we removed unqueued it) 1541 * 1 - if the futex_q was still queued (and we removed unqueued it);
1541 * 0 - if the futex_q was already removed by the waking thread 1542 * 0 - if the futex_q was already removed by the waking thread
1542 */ 1543 */
1543static int unqueue_me(struct futex_q *q) 1544static int unqueue_me(struct futex_q *q)
@@ -1707,9 +1708,9 @@ static long futex_wait_restart(struct restart_block *restart);
1707 * the pi_state owner as well as handle race conditions that may allow us to 1708 * the pi_state owner as well as handle race conditions that may allow us to
1708 * acquire the lock. Must be called with the hb lock held. 1709 * acquire the lock. Must be called with the hb lock held.
1709 * 1710 *
1710 * Returns: 1711 * Return:
1711 * 1 - success, lock taken 1712 * 1 - success, lock taken;
1712 * 0 - success, lock not taken 1713 * 0 - success, lock not taken;
1713 * <0 - on error (-EFAULT) 1714 * <0 - on error (-EFAULT)
1714 */ 1715 */
1715static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) 1716static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
@@ -1824,8 +1825,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
1824 * Return with the hb lock held and a q.key reference on success, and unlocked 1825 * Return with the hb lock held and a q.key reference on success, and unlocked
1825 * with no q.key reference on failure. 1826 * with no q.key reference on failure.
1826 * 1827 *
1827 * Returns: 1828 * Return:
1828 * 0 - uaddr contains val and hb has been locked 1829 * 0 - uaddr contains val and hb has been locked;
1829 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked 1830 * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked
1830 */ 1831 */
1831static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, 1832static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags,
@@ -2203,9 +2204,9 @@ pi_faulted:
2203 * the wakeup and return the appropriate error code to the caller. Must be 2204 * the wakeup and return the appropriate error code to the caller. Must be
2204 * called with the hb lock held. 2205 * called with the hb lock held.
2205 * 2206 *
2206 * Returns 2207 * Return:
2207 * 0 - no early wakeup detected 2208 * 0 = no early wakeup detected;
2208 * <0 - -ETIMEDOUT or -ERESTARTNOINTR 2209 * <0 = -ETIMEDOUT or -ERESTARTNOINTR
2209 */ 2210 */
2210static inline 2211static inline
2211int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, 2212int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
@@ -2247,7 +2248,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2247 * @val: the expected value of uaddr 2248 * @val: the expected value of uaddr
2248 * @abs_time: absolute timeout 2249 * @abs_time: absolute timeout
2249 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all 2250 * @bitset: 32 bit wakeup bitset set by userspace, defaults to all
2250 * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0)
2251 * @uaddr2: the pi futex we will take prior to returning to user-space 2251 * @uaddr2: the pi futex we will take prior to returning to user-space
2252 * 2252 *
2253 * The caller will wait on uaddr and will be requeued by futex_requeue() to 2253 * The caller will wait on uaddr and will be requeued by futex_requeue() to
@@ -2258,7 +2258,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2258 * there was a need to. 2258 * there was a need to.
2259 * 2259 *
2260 * We call schedule in futex_wait_queue_me() when we enqueue and return there 2260 * We call schedule in futex_wait_queue_me() when we enqueue and return there
2261 * via the following: 2261 * via the following--
2262 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() 2262 * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue()
2263 * 2) wakeup on uaddr2 after a requeue 2263 * 2) wakeup on uaddr2 after a requeue
2264 * 3) signal 2264 * 3) signal
@@ -2276,8 +2276,8 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
2276 * 2276 *
2277 * If 4 or 7, we cleanup and return with -ETIMEDOUT. 2277 * If 4 or 7, we cleanup and return with -ETIMEDOUT.
2278 * 2278 *
2279 * Returns: 2279 * Return:
2280 * 0 - On success 2280 * 0 - On success;
2281 * <0 - On error 2281 * <0 - On error
2282 */ 2282 */
2283static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, 2283static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
diff --git a/kernel/signal.c b/kernel/signal.c
index 2ec870a4c3c4..dd72567767d9 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -485,6 +485,9 @@ flush_signal_handlers(struct task_struct *t, int force_default)
485 if (force_default || ka->sa.sa_handler != SIG_IGN) 485 if (force_default || ka->sa.sa_handler != SIG_IGN)
486 ka->sa.sa_handler = SIG_DFL; 486 ka->sa.sa_handler = SIG_DFL;
487 ka->sa.sa_flags = 0; 487 ka->sa.sa_flags = 0;
488#ifdef __ARCH_HAS_SA_RESTORER
489 ka->sa.sa_restorer = NULL;
490#endif
488 sigemptyset(&ka->sa.sa_mask); 491 sigemptyset(&ka->sa.sa_mask);
489 ka++; 492 ka++;
490 } 493 }
@@ -2682,7 +2685,7 @@ static int do_sigpending(void *set, unsigned long sigsetsize)
2682/** 2685/**
2683 * sys_rt_sigpending - examine a pending signal that has been raised 2686 * sys_rt_sigpending - examine a pending signal that has been raised
2684 * while blocked 2687 * while blocked
2685 * @set: stores pending signals 2688 * @uset: stores pending signals
2686 * @sigsetsize: size of sigset_t type or larger 2689 * @sigsetsize: size of sigset_t type or larger
2687 */ 2690 */
2688SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) 2691SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index b9bde5727829..8eaed9aa9cf0 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -131,7 +131,7 @@ static int smpboot_thread_fn(void *data)
131 continue; 131 continue;
132 } 132 }
133 133
134 //BUG_ON(td->cpu != smp_processor_id()); 134 BUG_ON(td->cpu != smp_processor_id());
135 135
136 /* Check for state change setup */ 136 /* Check for state change setup */
137 switch (td->status) { 137 switch (td->status) {
@@ -209,6 +209,8 @@ static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cp
209{ 209{
210 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); 210 struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu);
211 211
212 if (ht->pre_unpark)
213 ht->pre_unpark(cpu);
212 kthread_unpark(tsk); 214 kthread_unpark(tsk);
213} 215}
214 216
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b4d252fd195b..14d7758074aa 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -323,18 +323,10 @@ void irq_enter(void)
323 323
324static inline void invoke_softirq(void) 324static inline void invoke_softirq(void)
325{ 325{
326 if (!force_irqthreads) { 326 if (!force_irqthreads)
327#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
328 __do_softirq(); 327 __do_softirq();
329#else 328 else
330 do_softirq();
331#endif
332 } else {
333 __local_bh_disable((unsigned long)__builtin_return_address(0),
334 SOFTIRQ_OFFSET);
335 wakeup_softirqd(); 329 wakeup_softirqd();
336 __local_bh_enable(SOFTIRQ_OFFSET);
337 }
338} 330}
339 331
340/* 332/*
@@ -342,9 +334,15 @@ static inline void invoke_softirq(void)
342 */ 334 */
343void irq_exit(void) 335void irq_exit(void)
344{ 336{
337#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
338 local_irq_disable();
339#else
340 WARN_ON_ONCE(!irqs_disabled());
341#endif
342
345 account_irq_exit_time(current); 343 account_irq_exit_time(current);
346 trace_hardirq_exit(); 344 trace_hardirq_exit();
347 sub_preempt_count(IRQ_EXIT_OFFSET); 345 sub_preempt_count(HARDIRQ_OFFSET);
348 if (!in_interrupt() && local_softirq_pending()) 346 if (!in_interrupt() && local_softirq_pending())
349 invoke_softirq(); 347 invoke_softirq();
350 348
@@ -354,7 +352,6 @@ void irq_exit(void)
354 tick_nohz_irq_exit(); 352 tick_nohz_irq_exit();
355#endif 353#endif
356 rcu_irq_exit(); 354 rcu_irq_exit();
357 sched_preempt_enable_no_resched();
358} 355}
359 356
360/* 357/*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 95d178c62d5a..c09f2955ae30 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -336,7 +336,7 @@ static struct smp_hotplug_thread cpu_stop_threads = {
336 .create = cpu_stop_create, 336 .create = cpu_stop_create,
337 .setup = cpu_stop_unpark, 337 .setup = cpu_stop_unpark,
338 .park = cpu_stop_park, 338 .park = cpu_stop_park,
339 .unpark = cpu_stop_unpark, 339 .pre_unpark = cpu_stop_unpark,
340 .selfparking = true, 340 .selfparking = true,
341}; 341};
342 342
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 192473b22799..fc382d6e2765 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -414,24 +414,28 @@ config PROBE_EVENTS
414 def_bool n 414 def_bool n
415 415
416config DYNAMIC_FTRACE 416config DYNAMIC_FTRACE
417 bool "enable/disable ftrace tracepoints dynamically" 417 bool "enable/disable function tracing dynamically"
418 depends on FUNCTION_TRACER 418 depends on FUNCTION_TRACER
419 depends on HAVE_DYNAMIC_FTRACE 419 depends on HAVE_DYNAMIC_FTRACE
420 default y 420 default y
421 help 421 help
422 This option will modify all the calls to ftrace dynamically 422 This option will modify all the calls to function tracing
423 (will patch them out of the binary image and replace them 423 dynamically (will patch them out of the binary image and
424 with a No-Op instruction) as they are called. A table is 424 replace them with a No-Op instruction) on boot up. During
425 created to dynamically enable them again. 425 compile time, a table is made of all the locations that ftrace
426 can function trace, and this table is linked into the kernel
427 image. When this is enabled, functions can be individually
428 enabled, and the functions not enabled will not affect
429 performance of the system.
430
431 See the files in /sys/kernel/debug/tracing:
432 available_filter_functions
433 set_ftrace_filter
434 set_ftrace_notrace
426 435
427 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but 436 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but
428 otherwise has native performance as long as no tracing is active. 437 otherwise has native performance as long as no tracing is active.
429 438
430 The changes to the code are done by a kernel thread that
431 wakes up once a second and checks to see if any ftrace calls
432 were made. If so, it runs stop_machine (stops all CPUS)
433 and modifies the code to jump over the call to ftrace.
434
435config DYNAMIC_FTRACE_WITH_REGS 439config DYNAMIC_FTRACE_WITH_REGS
436 def_bool y 440 def_bool y
437 depends on DYNAMIC_FTRACE 441 depends on DYNAMIC_FTRACE
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ab25b88aae56..6893d5a2bf08 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3104 continue; 3104 continue;
3105 } 3105 }
3106 3106
3107 hlist_del(&entry->node); 3107 hlist_del_rcu(&entry->node);
3108 call_rcu(&entry->rcu, ftrace_free_entry_rcu); 3108 call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu);
3109 } 3109 }
3110 } 3110 }
3111 __disable_ftrace_function_probe(); 3111 __disable_ftrace_function_probe();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c2e2c2310374..4f1dade56981 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
704void 704void
705update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 705update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
706{ 706{
707 struct ring_buffer *buf = tr->buffer; 707 struct ring_buffer *buf;
708 708
709 if (trace_stop_count) 709 if (trace_stop_count)
710 return; 710 return;
@@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
719 719
720 arch_spin_lock(&ftrace_max_lock); 720 arch_spin_lock(&ftrace_max_lock);
721 721
722 buf = tr->buffer;
722 tr->buffer = max_tr.buffer; 723 tr->buffer = max_tr.buffer;
723 max_tr.buffer = buf; 724 max_tr.buffer = buf;
724 725
@@ -2400,6 +2401,27 @@ static void test_ftrace_alive(struct seq_file *m)
2400 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); 2401 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2401} 2402}
2402 2403
2404#ifdef CONFIG_TRACER_MAX_TRACE
2405static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2406{
2407 if (iter->trace->allocated_snapshot)
2408 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2409 else
2410 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2411
2412 seq_printf(m, "# Snapshot commands:\n");
2413 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2414 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2415 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2416 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2417 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2418 seq_printf(m, "# is not a '0' or '1')\n");
2419}
2420#else
2421/* Should never be called */
2422static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2423#endif
2424
2403static int s_show(struct seq_file *m, void *v) 2425static int s_show(struct seq_file *m, void *v)
2404{ 2426{
2405 struct trace_iterator *iter = v; 2427 struct trace_iterator *iter = v;
@@ -2411,7 +2433,9 @@ static int s_show(struct seq_file *m, void *v)
2411 seq_puts(m, "#\n"); 2433 seq_puts(m, "#\n");
2412 test_ftrace_alive(m); 2434 test_ftrace_alive(m);
2413 } 2435 }
2414 if (iter->trace && iter->trace->print_header) 2436 if (iter->snapshot && trace_empty(iter))
2437 print_snapshot_help(m, iter);
2438 else if (iter->trace && iter->trace->print_header)
2415 iter->trace->print_header(m); 2439 iter->trace->print_header(m);
2416 else 2440 else
2417 trace_default_header(m); 2441 trace_default_header(m);
@@ -2857,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2857 return -EINVAL; 2881 return -EINVAL;
2858} 2882}
2859 2883
2860static void set_tracer_flags(unsigned int mask, int enabled) 2884/* Some tracers require overwrite to stay enabled */
2885int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
2886{
2887 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
2888 return -1;
2889
2890 return 0;
2891}
2892
2893int set_tracer_flag(unsigned int mask, int enabled)
2861{ 2894{
2862 /* do nothing if flag is already set */ 2895 /* do nothing if flag is already set */
2863 if (!!(trace_flags & mask) == !!enabled) 2896 if (!!(trace_flags & mask) == !!enabled)
2864 return; 2897 return 0;
2898
2899 /* Give the tracer a chance to approve the change */
2900 if (current_trace->flag_changed)
2901 if (current_trace->flag_changed(current_trace, mask, !!enabled))
2902 return -EINVAL;
2865 2903
2866 if (enabled) 2904 if (enabled)
2867 trace_flags |= mask; 2905 trace_flags |= mask;
@@ -2871,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled)
2871 if (mask == TRACE_ITER_RECORD_CMD) 2909 if (mask == TRACE_ITER_RECORD_CMD)
2872 trace_event_enable_cmd_record(enabled); 2910 trace_event_enable_cmd_record(enabled);
2873 2911
2874 if (mask == TRACE_ITER_OVERWRITE) 2912 if (mask == TRACE_ITER_OVERWRITE) {
2875 ring_buffer_change_overwrite(global_trace.buffer, enabled); 2913 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2914#ifdef CONFIG_TRACER_MAX_TRACE
2915 ring_buffer_change_overwrite(max_tr.buffer, enabled);
2916#endif
2917 }
2876 2918
2877 if (mask == TRACE_ITER_PRINTK) 2919 if (mask == TRACE_ITER_PRINTK)
2878 trace_printk_start_stop_comm(enabled); 2920 trace_printk_start_stop_comm(enabled);
2921
2922 return 0;
2879} 2923}
2880 2924
2881static int trace_set_options(char *option) 2925static int trace_set_options(char *option)
2882{ 2926{
2883 char *cmp; 2927 char *cmp;
2884 int neg = 0; 2928 int neg = 0;
2885 int ret = 0; 2929 int ret = -ENODEV;
2886 int i; 2930 int i;
2887 2931
2888 cmp = strstrip(option); 2932 cmp = strstrip(option);
@@ -2892,19 +2936,20 @@ static int trace_set_options(char *option)
2892 cmp += 2; 2936 cmp += 2;
2893 } 2937 }
2894 2938
2939 mutex_lock(&trace_types_lock);
2940
2895 for (i = 0; trace_options[i]; i++) { 2941 for (i = 0; trace_options[i]; i++) {
2896 if (strcmp(cmp, trace_options[i]) == 0) { 2942 if (strcmp(cmp, trace_options[i]) == 0) {
2897 set_tracer_flags(1 << i, !neg); 2943 ret = set_tracer_flag(1 << i, !neg);
2898 break; 2944 break;
2899 } 2945 }
2900 } 2946 }
2901 2947
2902 /* If no option could be set, test the specific tracer options */ 2948 /* If no option could be set, test the specific tracer options */
2903 if (!trace_options[i]) { 2949 if (!trace_options[i])
2904 mutex_lock(&trace_types_lock);
2905 ret = set_tracer_option(current_trace, cmp, neg); 2950 ret = set_tracer_option(current_trace, cmp, neg);
2906 mutex_unlock(&trace_types_lock); 2951
2907 } 2952 mutex_unlock(&trace_types_lock);
2908 2953
2909 return ret; 2954 return ret;
2910} 2955}
@@ -2914,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2914 size_t cnt, loff_t *ppos) 2959 size_t cnt, loff_t *ppos)
2915{ 2960{
2916 char buf[64]; 2961 char buf[64];
2962 int ret;
2917 2963
2918 if (cnt >= sizeof(buf)) 2964 if (cnt >= sizeof(buf))
2919 return -EINVAL; 2965 return -EINVAL;
@@ -2923,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2923 2969
2924 buf[cnt] = 0; 2970 buf[cnt] = 0;
2925 2971
2926 trace_set_options(buf); 2972 ret = trace_set_options(buf);
2973 if (ret < 0)
2974 return ret;
2927 2975
2928 *ppos += cnt; 2976 *ppos += cnt;
2929 2977
@@ -3227,6 +3275,9 @@ static int tracing_set_tracer(const char *buf)
3227 goto out; 3275 goto out;
3228 3276
3229 trace_branch_disable(); 3277 trace_branch_disable();
3278
3279 current_trace->enabled = false;
3280
3230 if (current_trace->reset) 3281 if (current_trace->reset)
3231 current_trace->reset(tr); 3282 current_trace->reset(tr);
3232 3283
@@ -3271,6 +3322,7 @@ static int tracing_set_tracer(const char *buf)
3271 } 3322 }
3272 3323
3273 current_trace = t; 3324 current_trace = t;
3325 current_trace->enabled = true;
3274 trace_branch_enable(tr); 3326 trace_branch_enable(tr);
3275 out: 3327 out:
3276 mutex_unlock(&trace_types_lock); 3328 mutex_unlock(&trace_types_lock);
@@ -4144,8 +4196,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4144 default: 4196 default:
4145 if (current_trace->allocated_snapshot) 4197 if (current_trace->allocated_snapshot)
4146 tracing_reset_online_cpus(&max_tr); 4198 tracing_reset_online_cpus(&max_tr);
4147 else
4148 ret = -EINVAL;
4149 break; 4199 break;
4150 } 4200 }
4151 4201
@@ -4759,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4759 4809
4760 if (val != 0 && val != 1) 4810 if (val != 0 && val != 1)
4761 return -EINVAL; 4811 return -EINVAL;
4762 set_tracer_flags(1 << index, val); 4812
4813 mutex_lock(&trace_types_lock);
4814 ret = set_tracer_flag(1 << index, val);
4815 mutex_unlock(&trace_types_lock);
4816
4817 if (ret < 0)
4818 return ret;
4763 4819
4764 *ppos += cnt; 4820 *ppos += cnt;
4765 4821
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 57d7e5397d56..2081971367ea 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -283,11 +283,15 @@ struct tracer {
283 enum print_line_t (*print_line)(struct trace_iterator *iter); 283 enum print_line_t (*print_line)(struct trace_iterator *iter);
284 /* If you handled the flag setting, return 0 */ 284 /* If you handled the flag setting, return 0 */
285 int (*set_flag)(u32 old_flags, u32 bit, int set); 285 int (*set_flag)(u32 old_flags, u32 bit, int set);
286 /* Return 0 if OK with change, else return non-zero */
287 int (*flag_changed)(struct tracer *tracer,
288 u32 mask, int set);
286 struct tracer *next; 289 struct tracer *next;
287 struct tracer_flags *flags; 290 struct tracer_flags *flags;
288 bool print_max; 291 bool print_max;
289 bool use_max_tr; 292 bool use_max_tr;
290 bool allocated_snapshot; 293 bool allocated_snapshot;
294 bool enabled;
291}; 295};
292 296
293 297
@@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[];
943 947
944void trace_printk_init_buffers(void); 948void trace_printk_init_buffers(void);
945void trace_printk_start_comm(void); 949void trace_printk_start_comm(void);
950int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
951int set_tracer_flag(unsigned int mask, int enabled);
946 952
947#undef FTRACE_ENTRY 953#undef FTRACE_ENTRY
948#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ 954#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 713a2cac4881..443b25b43b4f 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -32,7 +32,7 @@ enum {
32 32
33static int trace_type __read_mostly; 33static int trace_type __read_mostly;
34 34
35static int save_lat_flag; 35static int save_flags;
36 36
37static void stop_irqsoff_tracer(struct trace_array *tr, int graph); 37static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
38static int start_irqsoff_tracer(struct trace_array *tr, int graph); 38static int start_irqsoff_tracer(struct trace_array *tr, int graph);
@@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
558 558
559static void __irqsoff_tracer_init(struct trace_array *tr) 559static void __irqsoff_tracer_init(struct trace_array *tr)
560{ 560{
561 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 561 save_flags = trace_flags;
562 trace_flags |= TRACE_ITER_LATENCY_FMT; 562
563 /* non overwrite screws up the latency tracers */
564 set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
565 set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
563 566
564 tracing_max_latency = 0; 567 tracing_max_latency = 0;
565 irqsoff_trace = tr; 568 irqsoff_trace = tr;
@@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
573 576
574static void irqsoff_tracer_reset(struct trace_array *tr) 577static void irqsoff_tracer_reset(struct trace_array *tr)
575{ 578{
579 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
580 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
581
576 stop_irqsoff_tracer(tr, is_graph()); 582 stop_irqsoff_tracer(tr, is_graph());
577 583
578 if (!save_lat_flag) 584 set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
579 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 585 set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
580} 586}
581 587
582static void irqsoff_tracer_start(struct trace_array *tr) 588static void irqsoff_tracer_start(struct trace_array *tr)
@@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly =
609 .print_line = irqsoff_print_line, 615 .print_line = irqsoff_print_line,
610 .flags = &tracer_flags, 616 .flags = &tracer_flags,
611 .set_flag = irqsoff_set_flag, 617 .set_flag = irqsoff_set_flag,
618 .flag_changed = trace_keep_overwrite,
612#ifdef CONFIG_FTRACE_SELFTEST 619#ifdef CONFIG_FTRACE_SELFTEST
613 .selftest = trace_selftest_startup_irqsoff, 620 .selftest = trace_selftest_startup_irqsoff,
614#endif 621#endif
@@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly =
642 .print_line = irqsoff_print_line, 649 .print_line = irqsoff_print_line,
643 .flags = &tracer_flags, 650 .flags = &tracer_flags,
644 .set_flag = irqsoff_set_flag, 651 .set_flag = irqsoff_set_flag,
652 .flag_changed = trace_keep_overwrite,
645#ifdef CONFIG_FTRACE_SELFTEST 653#ifdef CONFIG_FTRACE_SELFTEST
646 .selftest = trace_selftest_startup_preemptoff, 654 .selftest = trace_selftest_startup_preemptoff,
647#endif 655#endif
@@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
677 .print_line = irqsoff_print_line, 685 .print_line = irqsoff_print_line,
678 .flags = &tracer_flags, 686 .flags = &tracer_flags,
679 .set_flag = irqsoff_set_flag, 687 .set_flag = irqsoff_set_flag,
688 .flag_changed = trace_keep_overwrite,
680#ifdef CONFIG_FTRACE_SELFTEST 689#ifdef CONFIG_FTRACE_SELFTEST
681 .selftest = trace_selftest_startup_preemptirqsoff, 690 .selftest = trace_selftest_startup_preemptirqsoff,
682#endif 691#endif
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 75aa97fbe1a1..fde652c9a511 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr);
36static int wakeup_graph_entry(struct ftrace_graph_ent *trace); 36static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
37static void wakeup_graph_return(struct ftrace_graph_ret *trace); 37static void wakeup_graph_return(struct ftrace_graph_ret *trace);
38 38
39static int save_lat_flag; 39static int save_flags;
40 40
41#define TRACE_DISPLAY_GRAPH 1 41#define TRACE_DISPLAY_GRAPH 1
42 42
@@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr)
540 540
541static int __wakeup_tracer_init(struct trace_array *tr) 541static int __wakeup_tracer_init(struct trace_array *tr)
542{ 542{
543 save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; 543 save_flags = trace_flags;
544 trace_flags |= TRACE_ITER_LATENCY_FMT; 544
545 /* non overwrite screws up the latency tracers */
546 set_tracer_flag(TRACE_ITER_OVERWRITE, 1);
547 set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1);
545 548
546 tracing_max_latency = 0; 549 tracing_max_latency = 0;
547 wakeup_trace = tr; 550 wakeup_trace = tr;
@@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
563 566
564static void wakeup_tracer_reset(struct trace_array *tr) 567static void wakeup_tracer_reset(struct trace_array *tr)
565{ 568{
569 int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT;
570 int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE;
571
566 stop_wakeup_tracer(tr); 572 stop_wakeup_tracer(tr);
567 /* make sure we put back any tasks we are tracing */ 573 /* make sure we put back any tasks we are tracing */
568 wakeup_reset(tr); 574 wakeup_reset(tr);
569 575
570 if (!save_lat_flag) 576 set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag);
571 trace_flags &= ~TRACE_ITER_LATENCY_FMT; 577 set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag);
572} 578}
573 579
574static void wakeup_tracer_start(struct trace_array *tr) 580static void wakeup_tracer_start(struct trace_array *tr)
@@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly =
594 .print_line = wakeup_print_line, 600 .print_line = wakeup_print_line,
595 .flags = &tracer_flags, 601 .flags = &tracer_flags,
596 .set_flag = wakeup_set_flag, 602 .set_flag = wakeup_set_flag,
603 .flag_changed = trace_keep_overwrite,
597#ifdef CONFIG_FTRACE_SELFTEST 604#ifdef CONFIG_FTRACE_SELFTEST
598 .selftest = trace_selftest_startup_wakeup, 605 .selftest = trace_selftest_startup_wakeup,
599#endif 606#endif
@@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
615 .print_line = wakeup_print_line, 622 .print_line = wakeup_print_line,
616 .flags = &tracer_flags, 623 .flags = &tracer_flags,
617 .set_flag = wakeup_set_flag, 624 .set_flag = wakeup_set_flag,
625 .flag_changed = trace_keep_overwrite,
618#ifdef CONFIG_FTRACE_SELFTEST 626#ifdef CONFIG_FTRACE_SELFTEST
619 .selftest = trace_selftest_startup_wakeup, 627 .selftest = trace_selftest_startup_wakeup,
620#endif 628#endif
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 8b650837083e..b14f4d342043 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -21,6 +21,7 @@
21#include <linux/uaccess.h> 21#include <linux/uaccess.h>
22#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <linux/projid.h> 23#include <linux/projid.h>
24#include <linux/fs_struct.h>
24 25
25static struct kmem_cache *user_ns_cachep __read_mostly; 26static struct kmem_cache *user_ns_cachep __read_mostly;
26 27
@@ -837,6 +838,9 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
837 if (atomic_read(&current->mm->mm_users) > 1) 838 if (atomic_read(&current->mm->mm_users) > 1)
838 return -EINVAL; 839 return -EINVAL;
839 840
841 if (current->fs->users != 1)
842 return -EINVAL;
843
840 if (!ns_capable(user_ns, CAP_SYS_ADMIN)) 844 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
841 return -EPERM; 845 return -EPERM;
842 846
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 81f2457811eb..55fac5b991b7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool)
457 int ret; 457 int ret;
458 458
459 mutex_lock(&worker_pool_idr_mutex); 459 mutex_lock(&worker_pool_idr_mutex);
460 idr_pre_get(&worker_pool_idr, GFP_KERNEL); 460 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
461 ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 461 if (ret >= 0)
462 pool->id = ret;
462 mutex_unlock(&worker_pool_idr_mutex); 463 mutex_unlock(&worker_pool_idr_mutex);
463 464
464 return ret; 465 return ret < 0 ? ret : 0;
465} 466}
466 467
467/* 468/*