aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-08-11 08:19:09 -0400
committerIngo Molnar <mingo@elte.hu>2009-08-11 08:19:09 -0400
commit89034bc2c7b839702c00a704e79d112737f98be0 (patch)
treee65b1f3d4c751baa840efc81bc4734f089379eb3 /kernel
parentfb82ad719831db58e9baa4c67015aae3fe27e7e3 (diff)
parent85dfd81dc57e8183a277ddd7a56aa65c96f3f487 (diff)
Merge branch 'linus' into tracing/core
Conflicts: kernel/trace/trace_events_filter.c We use the tracing/core version. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c151
-rw-r--r--kernel/fork.c30
-rw-r--r--kernel/freezer.c7
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/manage.c55
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/irq/numa_migrate.c4
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/kthread.c10
-rw-r--r--kernel/lockdep_proc.c3
-rw-r--r--kernel/module.c3
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/perf_counter.c516
-rw-r--r--kernel/posix-cpu-timers.c7
-rw-r--r--kernel/posix-timers.c7
-rw-r--r--kernel/profile.c5
-rw-r--r--kernel/rtmutex.c4
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/sched_cpupri.c15
-rw-r--r--kernel/sched_fair.c42
-rw-r--r--kernel/signal.c25
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/softirq.c64
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c19
-rw-r--r--kernel/trace/ring_buffer.c15
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_event_profile.c2
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--kernel/trace/trace_printk.c2
-rw-r--r--kernel/trace/trace_stack.c7
-rw-r--r--kernel/trace/trace_stat.c34
36 files changed, 712 insertions, 367 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 3737a682cdf5..b6eadfe30e7b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -47,6 +47,7 @@
47#include <linux/hash.h> 47#include <linux/hash.h>
48#include <linux/namei.h> 48#include <linux/namei.h>
49#include <linux/smp_lock.h> 49#include <linux/smp_lock.h>
50#include <linux/pid_namespace.h>
50 51
51#include <asm/atomic.h> 52#include <asm/atomic.h>
52 53
@@ -734,16 +735,28 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
734 * reference to css->refcnt. In general, this refcnt is expected to goes down 735 * reference to css->refcnt. In general, this refcnt is expected to goes down
735 * to zero, soon. 736 * to zero, soon.
736 * 737 *
737 * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex; 738 * CGRP_WAIT_ON_RMDIR flag is set under cgroup's inode->i_mutex;
738 */ 739 */
739DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); 740DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
740 741
741static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp) 742static void cgroup_wakeup_rmdir_waiter(struct cgroup *cgrp)
742{ 743{
743 if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) 744 if (unlikely(test_and_clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
744 wake_up_all(&cgroup_rmdir_waitq); 745 wake_up_all(&cgroup_rmdir_waitq);
745} 746}
746 747
748void cgroup_exclude_rmdir(struct cgroup_subsys_state *css)
749{
750 css_get(css);
751}
752
753void cgroup_release_and_wakeup_rmdir(struct cgroup_subsys_state *css)
754{
755 cgroup_wakeup_rmdir_waiter(css->cgroup);
756 css_put(css);
757}
758
759
747static int rebind_subsystems(struct cgroupfs_root *root, 760static int rebind_subsystems(struct cgroupfs_root *root,
748 unsigned long final_bits) 761 unsigned long final_bits)
749{ 762{
@@ -960,6 +973,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
960 INIT_LIST_HEAD(&cgrp->children); 973 INIT_LIST_HEAD(&cgrp->children);
961 INIT_LIST_HEAD(&cgrp->css_sets); 974 INIT_LIST_HEAD(&cgrp->css_sets);
962 INIT_LIST_HEAD(&cgrp->release_list); 975 INIT_LIST_HEAD(&cgrp->release_list);
976 INIT_LIST_HEAD(&cgrp->pids_list);
963 init_rwsem(&cgrp->pids_mutex); 977 init_rwsem(&cgrp->pids_mutex);
964} 978}
965static void init_cgroup_root(struct cgroupfs_root *root) 979static void init_cgroup_root(struct cgroupfs_root *root)
@@ -1357,7 +1371,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1357 * wake up rmdir() waiter. the rmdir should fail since the cgroup 1371 * wake up rmdir() waiter. the rmdir should fail since the cgroup
1358 * is no longer empty. 1372 * is no longer empty.
1359 */ 1373 */
1360 cgroup_wakeup_rmdir_waiters(cgrp); 1374 cgroup_wakeup_rmdir_waiter(cgrp);
1361 return 0; 1375 return 0;
1362} 1376}
1363 1377
@@ -2201,12 +2215,30 @@ err:
2201 return ret; 2215 return ret;
2202} 2216}
2203 2217
2218/*
2219 * Cache pids for all threads in the same pid namespace that are
2220 * opening the same "tasks" file.
2221 */
2222struct cgroup_pids {
2223 /* The node in cgrp->pids_list */
2224 struct list_head list;
2225 /* The cgroup those pids belong to */
2226 struct cgroup *cgrp;
2227 /* The namepsace those pids belong to */
2228 struct pid_namespace *ns;
2229 /* Array of process ids in the cgroup */
2230 pid_t *tasks_pids;
2231 /* How many files are using the this tasks_pids array */
2232 int use_count;
2233 /* Length of the current tasks_pids array */
2234 int length;
2235};
2236
2204static int cmppid(const void *a, const void *b) 2237static int cmppid(const void *a, const void *b)
2205{ 2238{
2206 return *(pid_t *)a - *(pid_t *)b; 2239 return *(pid_t *)a - *(pid_t *)b;
2207} 2240}
2208 2241
2209
2210/* 2242/*
2211 * seq_file methods for the "tasks" file. The seq_file position is the 2243 * seq_file methods for the "tasks" file. The seq_file position is the
2212 * next pid to display; the seq_file iterator is a pointer to the pid 2244 * next pid to display; the seq_file iterator is a pointer to the pid
@@ -2221,45 +2253,47 @@ static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
2221 * after a seek to the start). Use a binary-search to find the 2253 * after a seek to the start). Use a binary-search to find the
2222 * next pid to display, if any 2254 * next pid to display, if any
2223 */ 2255 */
2224 struct cgroup *cgrp = s->private; 2256 struct cgroup_pids *cp = s->private;
2257 struct cgroup *cgrp = cp->cgrp;
2225 int index = 0, pid = *pos; 2258 int index = 0, pid = *pos;
2226 int *iter; 2259 int *iter;
2227 2260
2228 down_read(&cgrp->pids_mutex); 2261 down_read(&cgrp->pids_mutex);
2229 if (pid) { 2262 if (pid) {
2230 int end = cgrp->pids_length; 2263 int end = cp->length;
2231 2264
2232 while (index < end) { 2265 while (index < end) {
2233 int mid = (index + end) / 2; 2266 int mid = (index + end) / 2;
2234 if (cgrp->tasks_pids[mid] == pid) { 2267 if (cp->tasks_pids[mid] == pid) {
2235 index = mid; 2268 index = mid;
2236 break; 2269 break;
2237 } else if (cgrp->tasks_pids[mid] <= pid) 2270 } else if (cp->tasks_pids[mid] <= pid)
2238 index = mid + 1; 2271 index = mid + 1;
2239 else 2272 else
2240 end = mid; 2273 end = mid;
2241 } 2274 }
2242 } 2275 }
2243 /* If we're off the end of the array, we're done */ 2276 /* If we're off the end of the array, we're done */
2244 if (index >= cgrp->pids_length) 2277 if (index >= cp->length)
2245 return NULL; 2278 return NULL;
2246 /* Update the abstract position to be the actual pid that we found */ 2279 /* Update the abstract position to be the actual pid that we found */
2247 iter = cgrp->tasks_pids + index; 2280 iter = cp->tasks_pids + index;
2248 *pos = *iter; 2281 *pos = *iter;
2249 return iter; 2282 return iter;
2250} 2283}
2251 2284
2252static void cgroup_tasks_stop(struct seq_file *s, void *v) 2285static void cgroup_tasks_stop(struct seq_file *s, void *v)
2253{ 2286{
2254 struct cgroup *cgrp = s->private; 2287 struct cgroup_pids *cp = s->private;
2288 struct cgroup *cgrp = cp->cgrp;
2255 up_read(&cgrp->pids_mutex); 2289 up_read(&cgrp->pids_mutex);
2256} 2290}
2257 2291
2258static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos) 2292static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
2259{ 2293{
2260 struct cgroup *cgrp = s->private; 2294 struct cgroup_pids *cp = s->private;
2261 int *p = v; 2295 int *p = v;
2262 int *end = cgrp->tasks_pids + cgrp->pids_length; 2296 int *end = cp->tasks_pids + cp->length;
2263 2297
2264 /* 2298 /*
2265 * Advance to the next pid in the array. If this goes off the 2299 * Advance to the next pid in the array. If this goes off the
@@ -2286,26 +2320,33 @@ static struct seq_operations cgroup_tasks_seq_operations = {
2286 .show = cgroup_tasks_show, 2320 .show = cgroup_tasks_show,
2287}; 2321};
2288 2322
2289static void release_cgroup_pid_array(struct cgroup *cgrp) 2323static void release_cgroup_pid_array(struct cgroup_pids *cp)
2290{ 2324{
2325 struct cgroup *cgrp = cp->cgrp;
2326
2291 down_write(&cgrp->pids_mutex); 2327 down_write(&cgrp->pids_mutex);
2292 BUG_ON(!cgrp->pids_use_count); 2328 BUG_ON(!cp->use_count);
2293 if (!--cgrp->pids_use_count) { 2329 if (!--cp->use_count) {
2294 kfree(cgrp->tasks_pids); 2330 list_del(&cp->list);
2295 cgrp->tasks_pids = NULL; 2331 put_pid_ns(cp->ns);
2296 cgrp->pids_length = 0; 2332 kfree(cp->tasks_pids);
2333 kfree(cp);
2297 } 2334 }
2298 up_write(&cgrp->pids_mutex); 2335 up_write(&cgrp->pids_mutex);
2299} 2336}
2300 2337
2301static int cgroup_tasks_release(struct inode *inode, struct file *file) 2338static int cgroup_tasks_release(struct inode *inode, struct file *file)
2302{ 2339{
2303 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2340 struct seq_file *seq;
2341 struct cgroup_pids *cp;
2304 2342
2305 if (!(file->f_mode & FMODE_READ)) 2343 if (!(file->f_mode & FMODE_READ))
2306 return 0; 2344 return 0;
2307 2345
2308 release_cgroup_pid_array(cgrp); 2346 seq = file->private_data;
2347 cp = seq->private;
2348
2349 release_cgroup_pid_array(cp);
2309 return seq_release(inode, file); 2350 return seq_release(inode, file);
2310} 2351}
2311 2352
@@ -2324,6 +2365,8 @@ static struct file_operations cgroup_tasks_operations = {
2324static int cgroup_tasks_open(struct inode *unused, struct file *file) 2365static int cgroup_tasks_open(struct inode *unused, struct file *file)
2325{ 2366{
2326 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2367 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2368 struct pid_namespace *ns = current->nsproxy->pid_ns;
2369 struct cgroup_pids *cp;
2327 pid_t *pidarray; 2370 pid_t *pidarray;
2328 int npids; 2371 int npids;
2329 int retval; 2372 int retval;
@@ -2350,20 +2393,37 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file)
2350 * array if necessary 2393 * array if necessary
2351 */ 2394 */
2352 down_write(&cgrp->pids_mutex); 2395 down_write(&cgrp->pids_mutex);
2353 kfree(cgrp->tasks_pids); 2396
2354 cgrp->tasks_pids = pidarray; 2397 list_for_each_entry(cp, &cgrp->pids_list, list) {
2355 cgrp->pids_length = npids; 2398 if (ns == cp->ns)
2356 cgrp->pids_use_count++; 2399 goto found;
2400 }
2401
2402 cp = kzalloc(sizeof(*cp), GFP_KERNEL);
2403 if (!cp) {
2404 up_write(&cgrp->pids_mutex);
2405 kfree(pidarray);
2406 return -ENOMEM;
2407 }
2408 cp->cgrp = cgrp;
2409 cp->ns = ns;
2410 get_pid_ns(ns);
2411 list_add(&cp->list, &cgrp->pids_list);
2412found:
2413 kfree(cp->tasks_pids);
2414 cp->tasks_pids = pidarray;
2415 cp->length = npids;
2416 cp->use_count++;
2357 up_write(&cgrp->pids_mutex); 2417 up_write(&cgrp->pids_mutex);
2358 2418
2359 file->f_op = &cgroup_tasks_operations; 2419 file->f_op = &cgroup_tasks_operations;
2360 2420
2361 retval = seq_open(file, &cgroup_tasks_seq_operations); 2421 retval = seq_open(file, &cgroup_tasks_seq_operations);
2362 if (retval) { 2422 if (retval) {
2363 release_cgroup_pid_array(cgrp); 2423 release_cgroup_pid_array(cp);
2364 return retval; 2424 return retval;
2365 } 2425 }
2366 ((struct seq_file *)file->private_data)->private = cgrp; 2426 ((struct seq_file *)file->private_data)->private = cp;
2367 return 0; 2427 return 0;
2368} 2428}
2369 2429
@@ -2696,33 +2756,42 @@ again:
2696 mutex_unlock(&cgroup_mutex); 2756 mutex_unlock(&cgroup_mutex);
2697 2757
2698 /* 2758 /*
2759 * In general, subsystem has no css->refcnt after pre_destroy(). But
2760 * in racy cases, subsystem may have to get css->refcnt after
2761 * pre_destroy() and it makes rmdir return with -EBUSY. This sometimes
2762 * make rmdir return -EBUSY too often. To avoid that, we use waitqueue
2763 * for cgroup's rmdir. CGRP_WAIT_ON_RMDIR is for synchronizing rmdir
2764 * and subsystem's reference count handling. Please see css_get/put
2765 * and css_tryget() and cgroup_wakeup_rmdir_waiter() implementation.
2766 */
2767 set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2768
2769 /*
2699 * Call pre_destroy handlers of subsys. Notify subsystems 2770 * Call pre_destroy handlers of subsys. Notify subsystems
2700 * that rmdir() request comes. 2771 * that rmdir() request comes.
2701 */ 2772 */
2702 ret = cgroup_call_pre_destroy(cgrp); 2773 ret = cgroup_call_pre_destroy(cgrp);
2703 if (ret) 2774 if (ret) {
2775 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2704 return ret; 2776 return ret;
2777 }
2705 2778
2706 mutex_lock(&cgroup_mutex); 2779 mutex_lock(&cgroup_mutex);
2707 parent = cgrp->parent; 2780 parent = cgrp->parent;
2708 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { 2781 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
2782 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2709 mutex_unlock(&cgroup_mutex); 2783 mutex_unlock(&cgroup_mutex);
2710 return -EBUSY; 2784 return -EBUSY;
2711 } 2785 }
2712 /*
2713 * css_put/get is provided for subsys to grab refcnt to css. In typical
2714 * case, subsystem has no reference after pre_destroy(). But, under
2715 * hierarchy management, some *temporal* refcnt can be hold.
2716 * To avoid returning -EBUSY to a user, waitqueue is used. If subsys
2717 * is really busy, it should return -EBUSY at pre_destroy(). wake_up
2718 * is called when css_put() is called and refcnt goes down to 0.
2719 */
2720 set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2721 prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); 2786 prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
2722
2723 if (!cgroup_clear_css_refs(cgrp)) { 2787 if (!cgroup_clear_css_refs(cgrp)) {
2724 mutex_unlock(&cgroup_mutex); 2788 mutex_unlock(&cgroup_mutex);
2725 schedule(); 2789 /*
2790 * Because someone may call cgroup_wakeup_rmdir_waiter() before
2791 * prepare_to_wait(), we need to check this flag.
2792 */
2793 if (test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))
2794 schedule();
2726 finish_wait(&cgroup_rmdir_waitq, &wait); 2795 finish_wait(&cgroup_rmdir_waitq, &wait);
2727 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); 2796 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2728 if (signal_pending(current)) 2797 if (signal_pending(current))
@@ -3294,7 +3363,7 @@ void __css_put(struct cgroup_subsys_state *css)
3294 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3363 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3295 check_for_release(cgrp); 3364 check_for_release(cgrp);
3296 } 3365 }
3297 cgroup_wakeup_rmdir_waiters(cgrp); 3366 cgroup_wakeup_rmdir_waiter(cgrp);
3298 } 3367 }
3299 rcu_read_unlock(); 3368 rcu_read_unlock();
3300} 3369}
diff --git a/kernel/fork.c b/kernel/fork.c
index bd2959228871..021e1138556e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -426,6 +426,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
426 init_rwsem(&mm->mmap_sem); 426 init_rwsem(&mm->mmap_sem);
427 INIT_LIST_HEAD(&mm->mmlist); 427 INIT_LIST_HEAD(&mm->mmlist);
428 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; 428 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
429 mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0;
429 mm->core_state = NULL; 430 mm->core_state = NULL;
430 mm->nr_ptes = 0; 431 mm->nr_ptes = 0;
431 set_mm_counter(mm, file_rss, 0); 432 set_mm_counter(mm, file_rss, 0);
@@ -567,18 +568,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
567 * the value intact in a core dump, and to save the unnecessary 568 * the value intact in a core dump, and to save the unnecessary
568 * trouble otherwise. Userland only wants this done for a sys_exit. 569 * trouble otherwise. Userland only wants this done for a sys_exit.
569 */ 570 */
570 if (tsk->clear_child_tid 571 if (tsk->clear_child_tid) {
571 && !(tsk->flags & PF_SIGNALED) 572 if (!(tsk->flags & PF_SIGNALED) &&
572 && atomic_read(&mm->mm_users) > 1) { 573 atomic_read(&mm->mm_users) > 1) {
573 u32 __user * tidptr = tsk->clear_child_tid; 574 /*
575 * We don't check the error code - if userspace has
576 * not set up a proper pointer then tough luck.
577 */
578 put_user(0, tsk->clear_child_tid);
579 sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
580 1, NULL, NULL, 0);
581 }
574 tsk->clear_child_tid = NULL; 582 tsk->clear_child_tid = NULL;
575
576 /*
577 * We don't check the error code - if userspace has
578 * not set up a proper pointer then tough luck.
579 */
580 put_user(0, tidptr);
581 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
582 } 583 }
583} 584}
584 585
@@ -1268,6 +1269,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1268 write_unlock_irq(&tasklist_lock); 1269 write_unlock_irq(&tasklist_lock);
1269 proc_fork_connector(p); 1270 proc_fork_connector(p);
1270 cgroup_post_fork(p); 1271 cgroup_post_fork(p);
1272 perf_counter_fork(p);
1271 return p; 1273 return p;
1272 1274
1273bad_fork_free_pid: 1275bad_fork_free_pid:
@@ -1407,12 +1409,6 @@ long do_fork(unsigned long clone_flags,
1407 if (clone_flags & CLONE_VFORK) { 1409 if (clone_flags & CLONE_VFORK) {
1408 p->vfork_done = &vfork; 1410 p->vfork_done = &vfork;
1409 init_completion(&vfork); 1411 init_completion(&vfork);
1410 } else if (!(clone_flags & CLONE_VM)) {
1411 /*
1412 * vfork will do an exec which will call
1413 * set_task_comm()
1414 */
1415 perf_counter_fork(p);
1416 } 1412 }
1417 1413
1418 audit_finish_fork(p); 1414 audit_finish_fork(p);
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 2f4936cf7083..bd1d42b17cb2 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -44,12 +44,19 @@ void refrigerator(void)
44 recalc_sigpending(); /* We sent fake signal, clean it up */ 44 recalc_sigpending(); /* We sent fake signal, clean it up */
45 spin_unlock_irq(&current->sighand->siglock); 45 spin_unlock_irq(&current->sighand->siglock);
46 46
47 /* prevent accounting of that task to load */
48 current->flags |= PF_FREEZING;
49
47 for (;;) { 50 for (;;) {
48 set_current_state(TASK_UNINTERRUPTIBLE); 51 set_current_state(TASK_UNINTERRUPTIBLE);
49 if (!frozen(current)) 52 if (!frozen(current))
50 break; 53 break;
51 schedule(); 54 schedule();
52 } 55 }
56
57 /* Remove the accounting blocker */
58 current->flags &= ~PF_FREEZING;
59
53 pr_debug("%s left refrigerator\n", current->comm); 60 pr_debug("%s left refrigerator\n", current->comm);
54 __set_current_state(save); 61 __set_current_state(save);
55} 62}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 73468253143b..e70ed5592eb9 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -42,8 +42,7 @@ static inline void unregister_handler_proc(unsigned int irq,
42 42
43extern int irq_select_affinity_usr(unsigned int irq); 43extern int irq_select_affinity_usr(unsigned int irq);
44 44
45extern void 45extern void irq_set_thread_affinity(struct irq_desc *desc);
46irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask);
47 46
48/* 47/*
49 * Debugging printout: 48 * Debugging printout:
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 50da67672901..61c679db4687 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -80,14 +80,22 @@ int irq_can_set_affinity(unsigned int irq)
80 return 1; 80 return 1;
81} 81}
82 82
83void 83/**
84irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask) 84 * irq_set_thread_affinity - Notify irq threads to adjust affinity
85 * @desc: irq descriptor which has affitnity changed
86 *
87 * We just set IRQTF_AFFINITY and delegate the affinity setting
88 * to the interrupt thread itself. We can not call
89 * set_cpus_allowed_ptr() here as we hold desc->lock and this
90 * code can be called from hard interrupt context.
91 */
92void irq_set_thread_affinity(struct irq_desc *desc)
85{ 93{
86 struct irqaction *action = desc->action; 94 struct irqaction *action = desc->action;
87 95
88 while (action) { 96 while (action) {
89 if (action->thread) 97 if (action->thread)
90 set_cpus_allowed_ptr(action->thread, cpumask); 98 set_bit(IRQTF_AFFINITY, &action->thread_flags);
91 action = action->next; 99 action = action->next;
92 } 100 }
93} 101}
@@ -112,7 +120,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
112 if (desc->status & IRQ_MOVE_PCNTXT) { 120 if (desc->status & IRQ_MOVE_PCNTXT) {
113 if (!desc->chip->set_affinity(irq, cpumask)) { 121 if (!desc->chip->set_affinity(irq, cpumask)) {
114 cpumask_copy(desc->affinity, cpumask); 122 cpumask_copy(desc->affinity, cpumask);
115 irq_set_thread_affinity(desc, cpumask); 123 irq_set_thread_affinity(desc);
116 } 124 }
117 } 125 }
118 else { 126 else {
@@ -122,7 +130,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
122#else 130#else
123 if (!desc->chip->set_affinity(irq, cpumask)) { 131 if (!desc->chip->set_affinity(irq, cpumask)) {
124 cpumask_copy(desc->affinity, cpumask); 132 cpumask_copy(desc->affinity, cpumask);
125 irq_set_thread_affinity(desc, cpumask); 133 irq_set_thread_affinity(desc);
126 } 134 }
127#endif 135#endif
128 desc->status |= IRQ_AFFINITY_SET; 136 desc->status |= IRQ_AFFINITY_SET;
@@ -176,7 +184,7 @@ int irq_select_affinity_usr(unsigned int irq)
176 spin_lock_irqsave(&desc->lock, flags); 184 spin_lock_irqsave(&desc->lock, flags);
177 ret = setup_affinity(irq, desc); 185 ret = setup_affinity(irq, desc);
178 if (!ret) 186 if (!ret)
179 irq_set_thread_affinity(desc, desc->affinity); 187 irq_set_thread_affinity(desc);
180 spin_unlock_irqrestore(&desc->lock, flags); 188 spin_unlock_irqrestore(&desc->lock, flags);
181 189
182 return ret; 190 return ret;
@@ -443,6 +451,39 @@ static int irq_wait_for_interrupt(struct irqaction *action)
443 return -1; 451 return -1;
444} 452}
445 453
454#ifdef CONFIG_SMP
455/*
456 * Check whether we need to change the affinity of the interrupt thread.
457 */
458static void
459irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
460{
461 cpumask_var_t mask;
462
463 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
464 return;
465
466 /*
467 * In case we are out of memory we set IRQTF_AFFINITY again and
468 * try again next time
469 */
470 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
471 set_bit(IRQTF_AFFINITY, &action->thread_flags);
472 return;
473 }
474
475 spin_lock_irq(&desc->lock);
476 cpumask_copy(mask, desc->affinity);
477 spin_unlock_irq(&desc->lock);
478
479 set_cpus_allowed_ptr(current, mask);
480 free_cpumask_var(mask);
481}
482#else
483static inline void
484irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
485#endif
486
446/* 487/*
447 * Interrupt handler thread 488 * Interrupt handler thread
448 */ 489 */
@@ -458,6 +499,8 @@ static int irq_thread(void *data)
458 499
459 while (!irq_wait_for_interrupt(action)) { 500 while (!irq_wait_for_interrupt(action)) {
460 501
502 irq_thread_check_affinity(desc, action);
503
461 atomic_inc(&desc->threads_active); 504 atomic_inc(&desc->threads_active);
462 505
463 spin_lock_irq(&desc->lock); 506 spin_lock_irq(&desc->lock);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index cfe767ca1545..fcb6c96f2627 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -45,7 +45,7 @@ void move_masked_irq(int irq)
45 < nr_cpu_ids)) 45 < nr_cpu_ids))
46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) { 46 if (!desc->chip->set_affinity(irq, desc->pending_mask)) {
47 cpumask_copy(desc->affinity, desc->pending_mask); 47 cpumask_copy(desc->affinity, desc->pending_mask);
48 irq_set_thread_affinity(desc, desc->pending_mask); 48 irq_set_thread_affinity(desc);
49 } 49 }
50 50
51 cpumask_clear(desc->pending_mask); 51 cpumask_clear(desc->pending_mask);
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 2f69bee57bf2..3fd30197da2e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -107,8 +107,8 @@ out_unlock:
107 107
108struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 108struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
109{ 109{
110 /* those all static, do move them */ 110 /* those static or target node is -1, do not move them */
111 if (desc->irq < NR_IRQS_LEGACY) 111 if (desc->irq < NR_IRQS_LEGACY || node == -1)
112 return desc; 112 return desc;
113 113
114 if (desc->node != node) 114 if (desc->node != node)
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ae1c35201cc8..f336e2107f98 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1228,7 +1228,7 @@ static int __init parse_crashkernel_mem(char *cmdline,
1228 } while (*cur++ == ','); 1228 } while (*cur++ == ',');
1229 1229
1230 if (*crash_size > 0) { 1230 if (*crash_size > 0) {
1231 while (*cur != ' ' && *cur != '@') 1231 while (*cur && *cur != ' ' && *cur != '@')
1232 cur++; 1232 cur++;
1233 if (*cur == '@') { 1233 if (*cur == '@') {
1234 cur++; 1234 cur++;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 6fe9dc6d1a81..ef177d653b2c 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -686,7 +686,7 @@ int __kprobes register_kprobe(struct kprobe *p)
686 p->addr = addr; 686 p->addr = addr;
687 687
688 preempt_disable(); 688 preempt_disable();
689 if (!__kernel_text_address((unsigned long) p->addr) || 689 if (!kernel_text_address((unsigned long) p->addr) ||
690 in_kprobes_functions((unsigned long) p->addr)) { 690 in_kprobes_functions((unsigned long) p->addr)) {
691 preempt_enable(); 691 preempt_enable();
692 return -EINVAL; 692 return -EINVAL;
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9b1a7de26979..eb8751aa0418 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -180,10 +180,12 @@ EXPORT_SYMBOL(kthread_bind);
180 * @k: thread created by kthread_create(). 180 * @k: thread created by kthread_create().
181 * 181 *
182 * Sets kthread_should_stop() for @k to return true, wakes it, and 182 * Sets kthread_should_stop() for @k to return true, wakes it, and
183 * waits for it to exit. Your threadfn() must not call do_exit() 183 * waits for it to exit. This can also be called after kthread_create()
184 * itself if you use this function! This can also be called after 184 * instead of calling wake_up_process(): the thread will exit without
185 * kthread_create() instead of calling wake_up_process(): the thread 185 * calling threadfn().
186 * will exit without calling threadfn(). 186 *
187 * If threadfn() may call do_exit() itself, the caller must ensure
188 * task_struct can't go away.
187 * 189 *
188 * Returns the result of threadfn(), or %-EINTR if wake_up_process() 190 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
189 * was never called. 191 * was never called.
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d7135aa2d2c4..e94caa666dba 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void)
758 &proc_lockdep_stats_operations); 758 &proc_lockdep_stats_operations);
759 759
760#ifdef CONFIG_LOCK_STAT 760#ifdef CONFIG_LOCK_STAT
761 proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); 761 proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
762 &proc_lock_stat_operations);
762#endif 763#endif
763 764
764 return 0; 765 return 0;
diff --git a/kernel/module.c b/kernel/module.c
index 0a049837008e..fd1411403558 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1068,7 +1068,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1068{ 1068{
1069 const unsigned long *crc; 1069 const unsigned long *crc;
1070 1070
1071 if (!find_symbol("module_layout", NULL, &crc, true, false)) 1071 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1072 &crc, true, false))
1072 BUG(); 1073 BUG();
1073 return check_version(sechdrs, versindex, "module_layout", mod, crc); 1074 return check_version(sechdrs, versindex, "module_layout", mod, crc);
1074} 1075}
diff --git a/kernel/panic.c b/kernel/panic.c
index 984b3ecbd72c..512ab73b0ca3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -301,6 +301,7 @@ int oops_may_print(void)
301 */ 301 */
302void oops_enter(void) 302void oops_enter(void)
303{ 303{
304 tracing_off();
304 /* can't trust the integrity of the kernel anymore: */ 305 /* can't trust the integrity of the kernel anymore: */
305 debug_locks_off(); 306 debug_locks_off();
306 do_oops_enter_exit(); 307 do_oops_enter_exit();
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index a641eb753b8c..b0b20a07f394 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1;
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_counters __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_counters __read_mostly;
45static atomic_t nr_task_counters __read_mostly;
45 46
46/* 47/*
47 * perf counter paranoia level: 48 * perf counter paranoia level:
@@ -146,6 +147,28 @@ static void put_ctx(struct perf_counter_context *ctx)
146 } 147 }
147} 148}
148 149
150static void unclone_ctx(struct perf_counter_context *ctx)
151{
152 if (ctx->parent_ctx) {
153 put_ctx(ctx->parent_ctx);
154 ctx->parent_ctx = NULL;
155 }
156}
157
158/*
159 * If we inherit counters we want to return the parent counter id
160 * to userspace.
161 */
162static u64 primary_counter_id(struct perf_counter *counter)
163{
164 u64 id = counter->id;
165
166 if (counter->parent)
167 id = counter->parent->id;
168
169 return id;
170}
171
149/* 172/*
150 * Get the perf_counter_context for a task and lock it. 173 * Get the perf_counter_context for a task and lock it.
151 * This has to cope with with the fact that until it is locked, 174 * This has to cope with with the fact that until it is locked,
@@ -1081,7 +1104,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1081 __perf_counter_sync_stat(counter, next_counter); 1104 __perf_counter_sync_stat(counter, next_counter);
1082 1105
1083 counter = list_next_entry(counter, event_entry); 1106 counter = list_next_entry(counter, event_entry);
1084 next_counter = list_next_entry(counter, event_entry); 1107 next_counter = list_next_entry(next_counter, event_entry);
1085 } 1108 }
1086} 1109}
1087 1110
@@ -1288,7 +1311,6 @@ static void perf_counter_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu)
1288#define MAX_INTERRUPTS (~0ULL) 1311#define MAX_INTERRUPTS (~0ULL)
1289 1312
1290static void perf_log_throttle(struct perf_counter *counter, int enable); 1313static void perf_log_throttle(struct perf_counter *counter, int enable);
1291static void perf_log_period(struct perf_counter *counter, u64 period);
1292 1314
1293static void perf_adjust_period(struct perf_counter *counter, u64 events) 1315static void perf_adjust_period(struct perf_counter *counter, u64 events)
1294{ 1316{
@@ -1307,8 +1329,6 @@ static void perf_adjust_period(struct perf_counter *counter, u64 events)
1307 if (!sample_period) 1329 if (!sample_period)
1308 sample_period = 1; 1330 sample_period = 1;
1309 1331
1310 perf_log_period(counter, sample_period);
1311
1312 hwc->sample_period = sample_period; 1332 hwc->sample_period = sample_period;
1313} 1333}
1314 1334
@@ -1463,10 +1483,8 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1463 /* 1483 /*
1464 * Unclone this context if we enabled any counter. 1484 * Unclone this context if we enabled any counter.
1465 */ 1485 */
1466 if (enabled && ctx->parent_ctx) { 1486 if (enabled)
1467 put_ctx(ctx->parent_ctx); 1487 unclone_ctx(ctx);
1468 ctx->parent_ctx = NULL;
1469 }
1470 1488
1471 spin_unlock(&ctx->lock); 1489 spin_unlock(&ctx->lock);
1472 1490
@@ -1526,7 +1544,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
1526 1544
1527static struct perf_counter_context *find_get_context(pid_t pid, int cpu) 1545static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1528{ 1546{
1529 struct perf_counter_context *parent_ctx;
1530 struct perf_counter_context *ctx; 1547 struct perf_counter_context *ctx;
1531 struct perf_cpu_context *cpuctx; 1548 struct perf_cpu_context *cpuctx;
1532 struct task_struct *task; 1549 struct task_struct *task;
@@ -1586,11 +1603,7 @@ static struct perf_counter_context *find_get_context(pid_t pid, int cpu)
1586 retry: 1603 retry:
1587 ctx = perf_lock_task_context(task, &flags); 1604 ctx = perf_lock_task_context(task, &flags);
1588 if (ctx) { 1605 if (ctx) {
1589 parent_ctx = ctx->parent_ctx; 1606 unclone_ctx(ctx);
1590 if (parent_ctx) {
1591 put_ctx(parent_ctx);
1592 ctx->parent_ctx = NULL; /* no longer a clone */
1593 }
1594 spin_unlock_irqrestore(&ctx->lock, flags); 1607 spin_unlock_irqrestore(&ctx->lock, flags);
1595 } 1608 }
1596 1609
@@ -1642,6 +1655,8 @@ static void free_counter(struct perf_counter *counter)
1642 atomic_dec(&nr_mmap_counters); 1655 atomic_dec(&nr_mmap_counters);
1643 if (counter->attr.comm) 1656 if (counter->attr.comm)
1644 atomic_dec(&nr_comm_counters); 1657 atomic_dec(&nr_comm_counters);
1658 if (counter->attr.task)
1659 atomic_dec(&nr_task_counters);
1645 } 1660 }
1646 1661
1647 if (counter->destroy) 1662 if (counter->destroy)
@@ -1676,6 +1691,18 @@ static int perf_release(struct inode *inode, struct file *file)
1676 return 0; 1691 return 0;
1677} 1692}
1678 1693
1694static u64 perf_counter_read_tree(struct perf_counter *counter)
1695{
1696 struct perf_counter *child;
1697 u64 total = 0;
1698
1699 total += perf_counter_read(counter);
1700 list_for_each_entry(child, &counter->child_list, child_list)
1701 total += perf_counter_read(child);
1702
1703 return total;
1704}
1705
1679/* 1706/*
1680 * Read the performance counter - simple non blocking version for now 1707 * Read the performance counter - simple non blocking version for now
1681 */ 1708 */
@@ -1695,7 +1722,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1695 1722
1696 WARN_ON_ONCE(counter->ctx->parent_ctx); 1723 WARN_ON_ONCE(counter->ctx->parent_ctx);
1697 mutex_lock(&counter->child_mutex); 1724 mutex_lock(&counter->child_mutex);
1698 values[0] = perf_counter_read(counter); 1725 values[0] = perf_counter_read_tree(counter);
1699 n = 1; 1726 n = 1;
1700 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1727 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1701 values[n++] = counter->total_time_enabled + 1728 values[n++] = counter->total_time_enabled +
@@ -1704,7 +1731,7 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1704 values[n++] = counter->total_time_running + 1731 values[n++] = counter->total_time_running +
1705 atomic64_read(&counter->child_total_time_running); 1732 atomic64_read(&counter->child_total_time_running);
1706 if (counter->attr.read_format & PERF_FORMAT_ID) 1733 if (counter->attr.read_format & PERF_FORMAT_ID)
1707 values[n++] = counter->id; 1734 values[n++] = primary_counter_id(counter);
1708 mutex_unlock(&counter->child_mutex); 1735 mutex_unlock(&counter->child_mutex);
1709 1736
1710 if (count < n * sizeof(u64)) 1737 if (count < n * sizeof(u64))
@@ -1811,8 +1838,6 @@ static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
1811 1838
1812 counter->attr.sample_freq = value; 1839 counter->attr.sample_freq = value;
1813 } else { 1840 } else {
1814 perf_log_period(counter, value);
1815
1816 counter->attr.sample_period = value; 1841 counter->attr.sample_period = value;
1817 counter->hw.sample_period = value; 1842 counter->hw.sample_period = value;
1818 } 1843 }
@@ -2661,10 +2686,14 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2661 if (sample_type & PERF_SAMPLE_ID) 2686 if (sample_type & PERF_SAMPLE_ID)
2662 header.size += sizeof(u64); 2687 header.size += sizeof(u64);
2663 2688
2689 if (sample_type & PERF_SAMPLE_STREAM_ID)
2690 header.size += sizeof(u64);
2691
2664 if (sample_type & PERF_SAMPLE_CPU) { 2692 if (sample_type & PERF_SAMPLE_CPU) {
2665 header.size += sizeof(cpu_entry); 2693 header.size += sizeof(cpu_entry);
2666 2694
2667 cpu_entry.cpu = raw_smp_processor_id(); 2695 cpu_entry.cpu = raw_smp_processor_id();
2696 cpu_entry.reserved = 0;
2668 } 2697 }
2669 2698
2670 if (sample_type & PERF_SAMPLE_PERIOD) 2699 if (sample_type & PERF_SAMPLE_PERIOD)
@@ -2685,6 +2714,18 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2685 header.size += sizeof(u64); 2714 header.size += sizeof(u64);
2686 } 2715 }
2687 2716
2717 if (sample_type & PERF_SAMPLE_RAW) {
2718 int size = sizeof(u32);
2719
2720 if (data->raw)
2721 size += data->raw->size;
2722 else
2723 size += sizeof(u32);
2724
2725 WARN_ON_ONCE(size & (sizeof(u64)-1));
2726 header.size += size;
2727 }
2728
2688 ret = perf_output_begin(&handle, counter, header.size, nmi, 1); 2729 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2689 if (ret) 2730 if (ret)
2690 return; 2731 return;
@@ -2703,7 +2744,13 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2703 if (sample_type & PERF_SAMPLE_ADDR) 2744 if (sample_type & PERF_SAMPLE_ADDR)
2704 perf_output_put(&handle, data->addr); 2745 perf_output_put(&handle, data->addr);
2705 2746
2706 if (sample_type & PERF_SAMPLE_ID) 2747 if (sample_type & PERF_SAMPLE_ID) {
2748 u64 id = primary_counter_id(counter);
2749
2750 perf_output_put(&handle, id);
2751 }
2752
2753 if (sample_type & PERF_SAMPLE_STREAM_ID)
2707 perf_output_put(&handle, counter->id); 2754 perf_output_put(&handle, counter->id);
2708 2755
2709 if (sample_type & PERF_SAMPLE_CPU) 2756 if (sample_type & PERF_SAMPLE_CPU)
@@ -2726,7 +2773,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2726 if (sub != counter) 2773 if (sub != counter)
2727 sub->pmu->read(sub); 2774 sub->pmu->read(sub);
2728 2775
2729 group_entry.id = sub->id; 2776 group_entry.id = primary_counter_id(sub);
2730 group_entry.counter = atomic64_read(&sub->count); 2777 group_entry.counter = atomic64_read(&sub->count);
2731 2778
2732 perf_output_put(&handle, group_entry); 2779 perf_output_put(&handle, group_entry);
@@ -2742,6 +2789,22 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2742 } 2789 }
2743 } 2790 }
2744 2791
2792 if (sample_type & PERF_SAMPLE_RAW) {
2793 if (data->raw) {
2794 perf_output_put(&handle, data->raw->size);
2795 perf_output_copy(&handle, data->raw->data, data->raw->size);
2796 } else {
2797 struct {
2798 u32 size;
2799 u32 data;
2800 } raw = {
2801 .size = sizeof(u32),
2802 .data = 0,
2803 };
2804 perf_output_put(&handle, raw);
2805 }
2806 }
2807
2745 perf_output_end(&handle); 2808 perf_output_end(&handle);
2746} 2809}
2747 2810
@@ -2786,15 +2849,8 @@ perf_counter_read_event(struct perf_counter *counter,
2786 } 2849 }
2787 2850
2788 if (counter->attr.read_format & PERF_FORMAT_ID) { 2851 if (counter->attr.read_format & PERF_FORMAT_ID) {
2789 u64 id;
2790
2791 event.header.size += sizeof(u64); 2852 event.header.size += sizeof(u64);
2792 if (counter->parent) 2853 event.format[i++] = primary_counter_id(counter);
2793 id = counter->parent->id;
2794 else
2795 id = counter->id;
2796
2797 event.format[i++] = id;
2798 } 2854 }
2799 2855
2800 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 2856 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
@@ -2806,48 +2862,56 @@ perf_counter_read_event(struct perf_counter *counter,
2806} 2862}
2807 2863
2808/* 2864/*
2809 * fork tracking 2865 * task tracking -- fork/exit
2866 *
2867 * enabled by: attr.comm | attr.mmap | attr.task
2810 */ 2868 */
2811 2869
2812struct perf_fork_event { 2870struct perf_task_event {
2813 struct task_struct *task; 2871 struct task_struct *task;
2872 struct perf_counter_context *task_ctx;
2814 2873
2815 struct { 2874 struct {
2816 struct perf_event_header header; 2875 struct perf_event_header header;
2817 2876
2818 u32 pid; 2877 u32 pid;
2819 u32 ppid; 2878 u32 ppid;
2879 u32 tid;
2880 u32 ptid;
2820 } event; 2881 } event;
2821}; 2882};
2822 2883
2823static void perf_counter_fork_output(struct perf_counter *counter, 2884static void perf_counter_task_output(struct perf_counter *counter,
2824 struct perf_fork_event *fork_event) 2885 struct perf_task_event *task_event)
2825{ 2886{
2826 struct perf_output_handle handle; 2887 struct perf_output_handle handle;
2827 int size = fork_event->event.header.size; 2888 int size = task_event->event.header.size;
2828 struct task_struct *task = fork_event->task; 2889 struct task_struct *task = task_event->task;
2829 int ret = perf_output_begin(&handle, counter, size, 0, 0); 2890 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2830 2891
2831 if (ret) 2892 if (ret)
2832 return; 2893 return;
2833 2894
2834 fork_event->event.pid = perf_counter_pid(counter, task); 2895 task_event->event.pid = perf_counter_pid(counter, task);
2835 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); 2896 task_event->event.ppid = perf_counter_pid(counter, task->real_parent);
2836 2897
2837 perf_output_put(&handle, fork_event->event); 2898 task_event->event.tid = perf_counter_tid(counter, task);
2899 task_event->event.ptid = perf_counter_tid(counter, task->real_parent);
2900
2901 perf_output_put(&handle, task_event->event);
2838 perf_output_end(&handle); 2902 perf_output_end(&handle);
2839} 2903}
2840 2904
2841static int perf_counter_fork_match(struct perf_counter *counter) 2905static int perf_counter_task_match(struct perf_counter *counter)
2842{ 2906{
2843 if (counter->attr.comm || counter->attr.mmap) 2907 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
2844 return 1; 2908 return 1;
2845 2909
2846 return 0; 2910 return 0;
2847} 2911}
2848 2912
2849static void perf_counter_fork_ctx(struct perf_counter_context *ctx, 2913static void perf_counter_task_ctx(struct perf_counter_context *ctx,
2850 struct perf_fork_event *fork_event) 2914 struct perf_task_event *task_event)
2851{ 2915{
2852 struct perf_counter *counter; 2916 struct perf_counter *counter;
2853 2917
@@ -2856,51 +2920,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2856 2920
2857 rcu_read_lock(); 2921 rcu_read_lock();
2858 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 2922 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2859 if (perf_counter_fork_match(counter)) 2923 if (perf_counter_task_match(counter))
2860 perf_counter_fork_output(counter, fork_event); 2924 perf_counter_task_output(counter, task_event);
2861 } 2925 }
2862 rcu_read_unlock(); 2926 rcu_read_unlock();
2863} 2927}
2864 2928
2865static void perf_counter_fork_event(struct perf_fork_event *fork_event) 2929static void perf_counter_task_event(struct perf_task_event *task_event)
2866{ 2930{
2867 struct perf_cpu_context *cpuctx; 2931 struct perf_cpu_context *cpuctx;
2868 struct perf_counter_context *ctx; 2932 struct perf_counter_context *ctx = task_event->task_ctx;
2869 2933
2870 cpuctx = &get_cpu_var(perf_cpu_context); 2934 cpuctx = &get_cpu_var(perf_cpu_context);
2871 perf_counter_fork_ctx(&cpuctx->ctx, fork_event); 2935 perf_counter_task_ctx(&cpuctx->ctx, task_event);
2872 put_cpu_var(perf_cpu_context); 2936 put_cpu_var(perf_cpu_context);
2873 2937
2874 rcu_read_lock(); 2938 rcu_read_lock();
2875 /* 2939 if (!ctx)
2876 * doesn't really matter which of the child contexts the 2940 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
2877 * events ends up in.
2878 */
2879 ctx = rcu_dereference(current->perf_counter_ctxp);
2880 if (ctx) 2941 if (ctx)
2881 perf_counter_fork_ctx(ctx, fork_event); 2942 perf_counter_task_ctx(ctx, task_event);
2882 rcu_read_unlock(); 2943 rcu_read_unlock();
2883} 2944}
2884 2945
2885void perf_counter_fork(struct task_struct *task) 2946static void perf_counter_task(struct task_struct *task,
2947 struct perf_counter_context *task_ctx,
2948 int new)
2886{ 2949{
2887 struct perf_fork_event fork_event; 2950 struct perf_task_event task_event;
2888 2951
2889 if (!atomic_read(&nr_comm_counters) && 2952 if (!atomic_read(&nr_comm_counters) &&
2890 !atomic_read(&nr_mmap_counters)) 2953 !atomic_read(&nr_mmap_counters) &&
2954 !atomic_read(&nr_task_counters))
2891 return; 2955 return;
2892 2956
2893 fork_event = (struct perf_fork_event){ 2957 task_event = (struct perf_task_event){
2894 .task = task, 2958 .task = task,
2895 .event = { 2959 .task_ctx = task_ctx,
2960 .event = {
2896 .header = { 2961 .header = {
2897 .type = PERF_EVENT_FORK, 2962 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
2898 .size = sizeof(fork_event.event), 2963 .misc = 0,
2964 .size = sizeof(task_event.event),
2899 }, 2965 },
2966 /* .pid */
2967 /* .ppid */
2968 /* .tid */
2969 /* .ptid */
2900 }, 2970 },
2901 }; 2971 };
2902 2972
2903 perf_counter_fork_event(&fork_event); 2973 perf_counter_task_event(&task_event);
2974}
2975
2976void perf_counter_fork(struct task_struct *task)
2977{
2978 perf_counter_task(task, NULL, 1);
2904} 2979}
2905 2980
2906/* 2981/*
@@ -2968,8 +3043,10 @@ static void perf_counter_comm_event(struct perf_comm_event *comm_event)
2968 struct perf_cpu_context *cpuctx; 3043 struct perf_cpu_context *cpuctx;
2969 struct perf_counter_context *ctx; 3044 struct perf_counter_context *ctx;
2970 unsigned int size; 3045 unsigned int size;
2971 char *comm = comm_event->task->comm; 3046 char comm[TASK_COMM_LEN];
2972 3047
3048 memset(comm, 0, sizeof(comm));
3049 strncpy(comm, comm_event->task->comm, sizeof(comm));
2973 size = ALIGN(strlen(comm)+1, sizeof(u64)); 3050 size = ALIGN(strlen(comm)+1, sizeof(u64));
2974 3051
2975 comm_event->comm = comm; 3052 comm_event->comm = comm;
@@ -3004,8 +3081,16 @@ void perf_counter_comm(struct task_struct *task)
3004 3081
3005 comm_event = (struct perf_comm_event){ 3082 comm_event = (struct perf_comm_event){
3006 .task = task, 3083 .task = task,
3084 /* .comm */
3085 /* .comm_size */
3007 .event = { 3086 .event = {
3008 .header = { .type = PERF_EVENT_COMM, }, 3087 .header = {
3088 .type = PERF_EVENT_COMM,
3089 .misc = 0,
3090 /* .size */
3091 },
3092 /* .pid */
3093 /* .tid */
3009 }, 3094 },
3010 }; 3095 };
3011 3096
@@ -3088,8 +3173,15 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3088 char *buf = NULL; 3173 char *buf = NULL;
3089 const char *name; 3174 const char *name;
3090 3175
3176 memset(tmp, 0, sizeof(tmp));
3177
3091 if (file) { 3178 if (file) {
3092 buf = kzalloc(PATH_MAX, GFP_KERNEL); 3179 /*
3180 * d_path works from the end of the buffer backwards, so we
3181 * need to add enough zero bytes after the string to handle
3182 * the 64bit alignment we do later.
3183 */
3184 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
3093 if (!buf) { 3185 if (!buf) {
3094 name = strncpy(tmp, "//enomem", sizeof(tmp)); 3186 name = strncpy(tmp, "//enomem", sizeof(tmp));
3095 goto got_name; 3187 goto got_name;
@@ -3100,9 +3192,11 @@ static void perf_counter_mmap_event(struct perf_mmap_event *mmap_event)
3100 goto got_name; 3192 goto got_name;
3101 } 3193 }
3102 } else { 3194 } else {
3103 name = arch_vma_name(mmap_event->vma); 3195 if (arch_vma_name(mmap_event->vma)) {
3104 if (name) 3196 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
3197 sizeof(tmp));
3105 goto got_name; 3198 goto got_name;
3199 }
3106 3200
3107 if (!vma->vm_mm) { 3201 if (!vma->vm_mm) {
3108 name = strncpy(tmp, "[vdso]", sizeof(tmp)); 3202 name = strncpy(tmp, "[vdso]", sizeof(tmp));
@@ -3147,8 +3241,16 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3147 3241
3148 mmap_event = (struct perf_mmap_event){ 3242 mmap_event = (struct perf_mmap_event){
3149 .vma = vma, 3243 .vma = vma,
3244 /* .file_name */
3245 /* .file_size */
3150 .event = { 3246 .event = {
3151 .header = { .type = PERF_EVENT_MMAP, }, 3247 .header = {
3248 .type = PERF_EVENT_MMAP,
3249 .misc = 0,
3250 /* .size */
3251 },
3252 /* .pid */
3253 /* .tid */
3152 .start = vma->vm_start, 3254 .start = vma->vm_start,
3153 .len = vma->vm_end - vma->vm_start, 3255 .len = vma->vm_end - vma->vm_start,
3154 .pgoff = vma->vm_pgoff, 3256 .pgoff = vma->vm_pgoff,
@@ -3159,49 +3261,6 @@ void __perf_counter_mmap(struct vm_area_struct *vma)
3159} 3261}
3160 3262
3161/* 3263/*
3162 * Log sample_period changes so that analyzing tools can re-normalize the
3163 * event flow.
3164 */
3165
3166struct freq_event {
3167 struct perf_event_header header;
3168 u64 time;
3169 u64 id;
3170 u64 period;
3171};
3172
3173static void perf_log_period(struct perf_counter *counter, u64 period)
3174{
3175 struct perf_output_handle handle;
3176 struct freq_event event;
3177 int ret;
3178
3179 if (counter->hw.sample_period == period)
3180 return;
3181
3182 if (counter->attr.sample_type & PERF_SAMPLE_PERIOD)
3183 return;
3184
3185 event = (struct freq_event) {
3186 .header = {
3187 .type = PERF_EVENT_PERIOD,
3188 .misc = 0,
3189 .size = sizeof(event),
3190 },
3191 .time = sched_clock(),
3192 .id = counter->id,
3193 .period = period,
3194 };
3195
3196 ret = perf_output_begin(&handle, counter, sizeof(event), 1, 0);
3197 if (ret)
3198 return;
3199
3200 perf_output_put(&handle, event);
3201 perf_output_end(&handle);
3202}
3203
3204/*
3205 * IRQ throttle logging 3264 * IRQ throttle logging
3206 */ 3265 */
3207 3266
@@ -3214,16 +3273,21 @@ static void perf_log_throttle(struct perf_counter *counter, int enable)
3214 struct perf_event_header header; 3273 struct perf_event_header header;
3215 u64 time; 3274 u64 time;
3216 u64 id; 3275 u64 id;
3276 u64 stream_id;
3217 } throttle_event = { 3277 } throttle_event = {
3218 .header = { 3278 .header = {
3219 .type = PERF_EVENT_THROTTLE + 1, 3279 .type = PERF_EVENT_THROTTLE,
3220 .misc = 0, 3280 .misc = 0,
3221 .size = sizeof(throttle_event), 3281 .size = sizeof(throttle_event),
3222 }, 3282 },
3223 .time = sched_clock(), 3283 .time = sched_clock(),
3224 .id = counter->id, 3284 .id = primary_counter_id(counter),
3285 .stream_id = counter->id,
3225 }; 3286 };
3226 3287
3288 if (enable)
3289 throttle_event.header.type = PERF_EVENT_UNTHROTTLE;
3290
3227 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0); 3291 ret = perf_output_begin(&handle, counter, sizeof(throttle_event), 1, 0);
3228 if (ret) 3292 if (ret)
3229 return; 3293 return;
@@ -3300,87 +3364,81 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
3300 * Generic software counter infrastructure 3364 * Generic software counter infrastructure
3301 */ 3365 */
3302 3366
3303static void perf_swcounter_update(struct perf_counter *counter) 3367/*
3368 * We directly increment counter->count and keep a second value in
3369 * counter->hw.period_left to count intervals. This period counter
3370 * is kept in the range [-sample_period, 0] so that we can use the
3371 * sign as trigger.
3372 */
3373
3374static u64 perf_swcounter_set_period(struct perf_counter *counter)
3304{ 3375{
3305 struct hw_perf_counter *hwc = &counter->hw; 3376 struct hw_perf_counter *hwc = &counter->hw;
3306 u64 prev, now; 3377 u64 period = hwc->last_period;
3307 s64 delta; 3378 u64 nr, offset;
3379 s64 old, val;
3380
3381 hwc->last_period = hwc->sample_period;
3308 3382
3309again: 3383again:
3310 prev = atomic64_read(&hwc->prev_count); 3384 old = val = atomic64_read(&hwc->period_left);
3311 now = atomic64_read(&hwc->count); 3385 if (val < 0)
3312 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) 3386 return 0;
3313 goto again;
3314 3387
3315 delta = now - prev; 3388 nr = div64_u64(period + val, period);
3389 offset = nr * period;
3390 val -= offset;
3391 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3392 goto again;
3316 3393
3317 atomic64_add(delta, &counter->count); 3394 return nr;
3318 atomic64_sub(delta, &hwc->period_left);
3319} 3395}
3320 3396
3321static void perf_swcounter_set_period(struct perf_counter *counter) 3397static void perf_swcounter_overflow(struct perf_counter *counter,
3398 int nmi, struct perf_sample_data *data)
3322{ 3399{
3323 struct hw_perf_counter *hwc = &counter->hw; 3400 struct hw_perf_counter *hwc = &counter->hw;
3324 s64 left = atomic64_read(&hwc->period_left); 3401 u64 overflow;
3325 s64 period = hwc->sample_period;
3326 3402
3327 if (unlikely(left <= -period)) { 3403 data->period = counter->hw.last_period;
3328 left = period; 3404 overflow = perf_swcounter_set_period(counter);
3329 atomic64_set(&hwc->period_left, left);
3330 hwc->last_period = period;
3331 }
3332 3405
3333 if (unlikely(left <= 0)) { 3406 if (hwc->interrupts == MAX_INTERRUPTS)
3334 left += period; 3407 return;
3335 atomic64_add(period, &hwc->period_left);
3336 hwc->last_period = period;
3337 }
3338 3408
3339 atomic64_set(&hwc->prev_count, -left); 3409 for (; overflow; overflow--) {
3340 atomic64_set(&hwc->count, -left); 3410 if (perf_counter_overflow(counter, nmi, data)) {
3411 /*
3412 * We inhibit the overflow from happening when
3413 * hwc->interrupts == MAX_INTERRUPTS.
3414 */
3415 break;
3416 }
3417 }
3341} 3418}
3342 3419
3343static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3420static void perf_swcounter_unthrottle(struct perf_counter *counter)
3344{ 3421{
3345 enum hrtimer_restart ret = HRTIMER_RESTART;
3346 struct perf_sample_data data;
3347 struct perf_counter *counter;
3348 u64 period;
3349
3350 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3351 counter->pmu->read(counter);
3352
3353 data.addr = 0;
3354 data.regs = get_irq_regs();
3355 /* 3422 /*
3356 * In case we exclude kernel IPs or are somehow not in interrupt 3423 * Nothing to do, we already reset hwc->interrupts.
3357 * context, provide the next best thing, the user IP.
3358 */ 3424 */
3359 if ((counter->attr.exclude_kernel || !data.regs) && 3425}
3360 !counter->attr.exclude_user)
3361 data.regs = task_pt_regs(current);
3362 3426
3363 if (data.regs) { 3427static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3364 if (perf_counter_overflow(counter, 0, &data)) 3428 int nmi, struct perf_sample_data *data)
3365 ret = HRTIMER_NORESTART; 3429{
3366 } 3430 struct hw_perf_counter *hwc = &counter->hw;
3367 3431
3368 period = max_t(u64, 10000, counter->hw.sample_period); 3432 atomic64_add(nr, &counter->count);
3369 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3370 3433
3371 return ret; 3434 if (!hwc->sample_period)
3372} 3435 return;
3373 3436
3374static void perf_swcounter_overflow(struct perf_counter *counter, 3437 if (!data->regs)
3375 int nmi, struct perf_sample_data *data) 3438 return;
3376{
3377 data->period = counter->hw.last_period;
3378 3439
3379 perf_swcounter_update(counter); 3440 if (!atomic64_add_negative(nr, &hwc->period_left))
3380 perf_swcounter_set_period(counter); 3441 perf_swcounter_overflow(counter, nmi, data);
3381 if (perf_counter_overflow(counter, nmi, data))
3382 /* soft-disable the counter */
3383 ;
3384} 3442}
3385 3443
3386static int perf_swcounter_is_counting(struct perf_counter *counter) 3444static int perf_swcounter_is_counting(struct perf_counter *counter)
@@ -3444,15 +3502,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
3444 return 1; 3502 return 1;
3445} 3503}
3446 3504
3447static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3448 int nmi, struct perf_sample_data *data)
3449{
3450 int neg = atomic64_add_negative(nr, &counter->hw.count);
3451
3452 if (counter->hw.sample_period && !neg && data->regs)
3453 perf_swcounter_overflow(counter, nmi, data);
3454}
3455
3456static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3505static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3457 enum perf_type_id type, 3506 enum perf_type_id type,
3458 u32 event, u64 nr, int nmi, 3507 u32 event, u64 nr, int nmi,
@@ -3531,27 +3580,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3531 3580
3532static void perf_swcounter_read(struct perf_counter *counter) 3581static void perf_swcounter_read(struct perf_counter *counter)
3533{ 3582{
3534 perf_swcounter_update(counter);
3535} 3583}
3536 3584
3537static int perf_swcounter_enable(struct perf_counter *counter) 3585static int perf_swcounter_enable(struct perf_counter *counter)
3538{ 3586{
3539 perf_swcounter_set_period(counter); 3587 struct hw_perf_counter *hwc = &counter->hw;
3588
3589 if (hwc->sample_period) {
3590 hwc->last_period = hwc->sample_period;
3591 perf_swcounter_set_period(counter);
3592 }
3540 return 0; 3593 return 0;
3541} 3594}
3542 3595
3543static void perf_swcounter_disable(struct perf_counter *counter) 3596static void perf_swcounter_disable(struct perf_counter *counter)
3544{ 3597{
3545 perf_swcounter_update(counter);
3546} 3598}
3547 3599
3548static const struct pmu perf_ops_generic = { 3600static const struct pmu perf_ops_generic = {
3549 .enable = perf_swcounter_enable, 3601 .enable = perf_swcounter_enable,
3550 .disable = perf_swcounter_disable, 3602 .disable = perf_swcounter_disable,
3551 .read = perf_swcounter_read, 3603 .read = perf_swcounter_read,
3604 .unthrottle = perf_swcounter_unthrottle,
3552}; 3605};
3553 3606
3554/* 3607/*
3608 * hrtimer based swcounter callback
3609 */
3610
3611static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3612{
3613 enum hrtimer_restart ret = HRTIMER_RESTART;
3614 struct perf_sample_data data;
3615 struct perf_counter *counter;
3616 u64 period;
3617
3618 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3619 counter->pmu->read(counter);
3620
3621 data.addr = 0;
3622 data.regs = get_irq_regs();
3623 /*
3624 * In case we exclude kernel IPs or are somehow not in interrupt
3625 * context, provide the next best thing, the user IP.
3626 */
3627 if ((counter->attr.exclude_kernel || !data.regs) &&
3628 !counter->attr.exclude_user)
3629 data.regs = task_pt_regs(current);
3630
3631 if (data.regs) {
3632 if (perf_counter_overflow(counter, 0, &data))
3633 ret = HRTIMER_NORESTART;
3634 }
3635
3636 period = max_t(u64, 10000, counter->hw.sample_period);
3637 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3638
3639 return ret;
3640}
3641
3642/*
3555 * Software counter: cpu wall time clock 3643 * Software counter: cpu wall time clock
3556 */ 3644 */
3557 3645
@@ -3668,17 +3756,24 @@ static const struct pmu perf_ops_task_clock = {
3668}; 3756};
3669 3757
3670#ifdef CONFIG_EVENT_PROFILE 3758#ifdef CONFIG_EVENT_PROFILE
3671void perf_tpcounter_event(int event_id) 3759void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3760 int entry_size)
3672{ 3761{
3762 struct perf_raw_record raw = {
3763 .size = entry_size,
3764 .data = record,
3765 };
3766
3673 struct perf_sample_data data = { 3767 struct perf_sample_data data = {
3674 .regs = get_irq_regs(); 3768 .regs = get_irq_regs(),
3675 .addr = 0, 3769 .addr = addr,
3770 .raw = &raw,
3676 }; 3771 };
3677 3772
3678 if (!data.regs) 3773 if (!data.regs)
3679 data.regs = task_pt_regs(current); 3774 data.regs = task_pt_regs(current);
3680 3775
3681 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); 3776 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
3682} 3777}
3683EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3778EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3684 3779
@@ -3687,16 +3782,20 @@ extern void ftrace_profile_disable(int);
3687 3782
3688static void tp_perf_counter_destroy(struct perf_counter *counter) 3783static void tp_perf_counter_destroy(struct perf_counter *counter)
3689{ 3784{
3690 ftrace_profile_disable(perf_event_id(&counter->attr)); 3785 ftrace_profile_disable(counter->attr.config);
3691} 3786}
3692 3787
3693static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 3788static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3694{ 3789{
3695 int event_id = perf_event_id(&counter->attr); 3790 /*
3696 int ret; 3791 * Raw tracepoint data is a severe data leak, only allow root to
3792 * have these.
3793 */
3794 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3795 !capable(CAP_SYS_ADMIN))
3796 return ERR_PTR(-EPERM);
3697 3797
3698 ret = ftrace_profile_enable(event_id); 3798 if (ftrace_profile_enable(counter->attr.config))
3699 if (ret)
3700 return NULL; 3799 return NULL;
3701 3800
3702 counter->destroy = tp_perf_counter_destroy; 3801 counter->destroy = tp_perf_counter_destroy;
@@ -3874,6 +3973,8 @@ done:
3874 atomic_inc(&nr_mmap_counters); 3973 atomic_inc(&nr_mmap_counters);
3875 if (counter->attr.comm) 3974 if (counter->attr.comm)
3876 atomic_inc(&nr_comm_counters); 3975 atomic_inc(&nr_comm_counters);
3976 if (counter->attr.task)
3977 atomic_inc(&nr_task_counters);
3877 } 3978 }
3878 3979
3879 return counter; 3980 return counter;
@@ -4235,8 +4336,10 @@ void perf_counter_exit_task(struct task_struct *child)
4235 struct perf_counter_context *child_ctx; 4336 struct perf_counter_context *child_ctx;
4236 unsigned long flags; 4337 unsigned long flags;
4237 4338
4238 if (likely(!child->perf_counter_ctxp)) 4339 if (likely(!child->perf_counter_ctxp)) {
4340 perf_counter_task(child, NULL, 0);
4239 return; 4341 return;
4342 }
4240 4343
4241 local_irq_save(flags); 4344 local_irq_save(flags);
4242 /* 4345 /*
@@ -4255,17 +4358,20 @@ void perf_counter_exit_task(struct task_struct *child)
4255 */ 4358 */
4256 spin_lock(&child_ctx->lock); 4359 spin_lock(&child_ctx->lock);
4257 child->perf_counter_ctxp = NULL; 4360 child->perf_counter_ctxp = NULL;
4258 if (child_ctx->parent_ctx) { 4361 /*
4259 /* 4362 * If this context is a clone; unclone it so it can't get
4260 * This context is a clone; unclone it so it can't get 4363 * swapped to another process while we're removing all
4261 * swapped to another process while we're removing all 4364 * the counters from it.
4262 * the counters from it. 4365 */
4263 */ 4366 unclone_ctx(child_ctx);
4264 put_ctx(child_ctx->parent_ctx); 4367 spin_unlock_irqrestore(&child_ctx->lock, flags);
4265 child_ctx->parent_ctx = NULL; 4368
4266 } 4369 /*
4267 spin_unlock(&child_ctx->lock); 4370 * Report the task dead after unscheduling the counters so that we
4268 local_irq_restore(flags); 4371 * won't get any samples after PERF_EVENT_EXIT. We can however still
4372 * get a few PERF_EVENT_READ events.
4373 */
4374 perf_counter_task(child, child_ctx, 0);
4269 4375
4270 /* 4376 /*
4271 * We can recurse on the same lock type through: 4377 * We can recurse on the same lock type through:
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index bece7c0b67b2..e33a21cb9407 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk)
521} 521}
522void posix_cpu_timers_exit_group(struct task_struct *tsk) 522void posix_cpu_timers_exit_group(struct task_struct *tsk)
523{ 523{
524 struct task_cputime cputime; 524 struct signal_struct *const sig = tsk->signal;
525 525
526 thread_group_cputimer(tsk, &cputime);
527 cleanup_timers(tsk->signal->cpu_timers, 526 cleanup_timers(tsk->signal->cpu_timers,
528 cputime.utime, cputime.stime, cputime.sum_exec_runtime); 527 cputime_add(tsk->utime, sig->utime),
528 cputime_add(tsk->stime, sig->stime),
529 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
529} 530}
530 531
531static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) 532static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 052ec4d195c7..d089d052c4a9 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer)
202 return -EOPNOTSUPP; 202 return -EOPNOTSUPP;
203} 203}
204 204
205static int no_nsleep(const clockid_t which_clock, int flags,
206 struct timespec *tsave, struct timespec __user *rmtp)
207{
208 return -EOPNOTSUPP;
209}
210
205/* 211/*
206 * Return nonzero if we know a priori this clockid_t value is bogus. 212 * Return nonzero if we know a priori this clockid_t value is bogus.
207 */ 213 */
@@ -254,6 +260,7 @@ static __init int init_posix_timers(void)
254 .clock_get = posix_get_monotonic_raw, 260 .clock_get = posix_get_monotonic_raw,
255 .clock_set = do_posix_clock_nosettime, 261 .clock_set = do_posix_clock_nosettime,
256 .timer_create = no_timer_create, 262 .timer_create = no_timer_create,
263 .nsleep = no_nsleep,
257 }; 264 };
258 265
259 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 266 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
diff --git a/kernel/profile.c b/kernel/profile.c
index 69911b5745eb..419250ebec4d 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -117,11 +117,12 @@ int __ref profile_init(void)
117 117
118 cpumask_copy(prof_cpu_mask, cpu_possible_mask); 118 cpumask_copy(prof_cpu_mask, cpu_possible_mask);
119 119
120 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL); 120 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL|__GFP_NOWARN);
121 if (prof_buffer) 121 if (prof_buffer)
122 return 0; 122 return 0;
123 123
124 prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO); 124 prof_buffer = alloc_pages_exact(buffer_bytes,
125 GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN);
125 if (prof_buffer) 126 if (prof_buffer)
126 return 0; 127 return 0;
127 128
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index fcd107a78c5a..29bd4baf9e75 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1039 if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { 1039 if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
1040 /* We got the lock for task. */ 1040 /* We got the lock for task. */
1041 debug_rt_mutex_lock(lock); 1041 debug_rt_mutex_lock(lock);
1042
1043 rt_mutex_set_owner(lock, task, 0); 1042 rt_mutex_set_owner(lock, task, 0);
1044 1043 spin_unlock(&lock->wait_lock);
1045 rt_mutex_deadlock_account_lock(lock, task); 1044 rt_mutex_deadlock_account_lock(lock, task);
1046 return 1; 1045 return 1;
1047 } 1046 }
1048 1047
1049 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); 1048 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
1050 1049
1051
1052 if (ret && !waiter->task) { 1050 if (ret && !waiter->task) {
1053 /* 1051 /*
1054 * Reset the return value. We might have 1052 * Reset the return value. We might have
diff --git a/kernel/sched.c b/kernel/sched.c
index 98972d366fdc..1b59e265273b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7289,6 +7289,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
7289static void calc_global_load_remove(struct rq *rq) 7289static void calc_global_load_remove(struct rq *rq)
7290{ 7290{
7291 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 7291 atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
7292 rq->calc_load_active = 0;
7292} 7293}
7293#endif /* CONFIG_HOTPLUG_CPU */ 7294#endif /* CONFIG_HOTPLUG_CPU */
7294 7295
@@ -7515,6 +7516,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7515 task_rq_unlock(rq, &flags); 7516 task_rq_unlock(rq, &flags);
7516 get_task_struct(p); 7517 get_task_struct(p);
7517 cpu_rq(cpu)->migration_thread = p; 7518 cpu_rq(cpu)->migration_thread = p;
7519 rq->calc_load_update = calc_load_update;
7518 break; 7520 break;
7519 7521
7520 case CPU_ONLINE: 7522 case CPU_ONLINE:
@@ -7525,8 +7527,6 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
7525 /* Update our root-domain */ 7527 /* Update our root-domain */
7526 rq = cpu_rq(cpu); 7528 rq = cpu_rq(cpu);
7527 spin_lock_irqsave(&rq->lock, flags); 7529 spin_lock_irqsave(&rq->lock, flags);
7528 rq->calc_load_update = calc_load_update;
7529 rq->calc_load_active = 0;
7530 if (rq->rd) { 7530 if (rq->rd) {
7531 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); 7531 BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
7532 7532
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index e6c251790dde..d014efbf947a 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
82 continue; 82 continue;
83 83
84 if (lowest_mask) 84 if (lowest_mask) {
85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
86
87 /*
88 * We have to ensure that we have at least one bit
89 * still set in the array, since the map could have
90 * been concurrently emptied between the first and
91 * second reads of vec->mask. If we hit this
92 * condition, simply act as though we never hit this
93 * priority level and continue on.
94 */
95 if (cpumask_any(lowest_mask) >= nr_cpu_ids)
96 continue;
97 }
98
86 return 1; 99 return 1;
87 } 100 }
88 101
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 7c248dc30f41..652e8bdef9aa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -266,6 +266,12 @@ static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
266 return min_vruntime; 266 return min_vruntime;
267} 267}
268 268
269static inline int entity_before(struct sched_entity *a,
270 struct sched_entity *b)
271{
272 return (s64)(a->vruntime - b->vruntime) < 0;
273}
274
269static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se) 275static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
270{ 276{
271 return se->vruntime - cfs_rq->min_vruntime; 277 return se->vruntime - cfs_rq->min_vruntime;
@@ -605,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
605static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
606{ 612{
607#ifdef CONFIG_SCHEDSTATS 613#ifdef CONFIG_SCHEDSTATS
614 struct task_struct *tsk = NULL;
615
616 if (entity_is_task(se))
617 tsk = task_of(se);
618
608 if (se->sleep_start) { 619 if (se->sleep_start) {
609 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 620 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
610 struct task_struct *tsk = task_of(se);
611 621
612 if ((s64)delta < 0) 622 if ((s64)delta < 0)
613 delta = 0; 623 delta = 0;
@@ -618,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
618 se->sleep_start = 0; 628 se->sleep_start = 0;
619 se->sum_sleep_runtime += delta; 629 se->sum_sleep_runtime += delta;
620 630
621 account_scheduler_latency(tsk, delta >> 10, 1); 631 if (tsk)
632 account_scheduler_latency(tsk, delta >> 10, 1);
622 } 633 }
623 if (se->block_start) { 634 if (se->block_start) {
624 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 635 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
625 struct task_struct *tsk = task_of(se);
626 636
627 if ((s64)delta < 0) 637 if ((s64)delta < 0)
628 delta = 0; 638 delta = 0;
@@ -633,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
633 se->block_start = 0; 643 se->block_start = 0;
634 se->sum_sleep_runtime += delta; 644 se->sum_sleep_runtime += delta;
635 645
636 /* 646 if (tsk) {
637 * Blocking time is in units of nanosecs, so shift by 20 to 647 /*
638 * get a milliseconds-range estimation of the amount of 648 * Blocking time is in units of nanosecs, so shift by
639 * time that the task spent sleeping: 649 * 20 to get a milliseconds-range estimation of the
640 */ 650 * amount of time that the task spent sleeping:
641 if (unlikely(prof_on == SLEEP_PROFILING)) { 651 */
642 652 if (unlikely(prof_on == SLEEP_PROFILING)) {
643 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), 653 profile_hits(SLEEP_PROFILING,
644 delta >> 20); 654 (void *)get_wchan(tsk),
655 delta >> 20);
656 }
657 account_scheduler_latency(tsk, delta >> 10, 0);
645 } 658 }
646 account_scheduler_latency(tsk, delta >> 10, 0);
647 } 659 }
648#endif 660#endif
649} 661}
@@ -1017,7 +1029,7 @@ static void yield_task_fair(struct rq *rq)
1017 /* 1029 /*
1018 * Already in the rightmost position? 1030 * Already in the rightmost position?
1019 */ 1031 */
1020 if (unlikely(!rightmost || rightmost->vruntime < se->vruntime)) 1032 if (unlikely(!rightmost || entity_before(rightmost, se)))
1021 return; 1033 return;
1022 1034
1023 /* 1035 /*
@@ -1713,7 +1725,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1713 1725
1714 /* 'curr' will be NULL if the child belongs to a different group */ 1726 /* 'curr' will be NULL if the child belongs to a different group */
1715 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) && 1727 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
1716 curr && curr->vruntime < se->vruntime) { 1728 curr && entity_before(curr, se)) {
1717 /* 1729 /*
1718 * Upon rescheduling, sched_class::put_prev_task() will place 1730 * Upon rescheduling, sched_class::put_prev_task() will place
1719 * 'current' within the tree based on its new key value. 1731 * 'current' within the tree based on its new key value.
diff --git a/kernel/signal.c b/kernel/signal.c
index ccf1ceedaebe..64c5deeaca5d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2454 stack_t oss; 2454 stack_t oss;
2455 int error; 2455 int error;
2456 2456
2457 if (uoss) { 2457 oss.ss_sp = (void __user *) current->sas_ss_sp;
2458 oss.ss_sp = (void __user *) current->sas_ss_sp; 2458 oss.ss_size = current->sas_ss_size;
2459 oss.ss_size = current->sas_ss_size; 2459 oss.ss_flags = sas_ss_flags(sp);
2460 oss.ss_flags = sas_ss_flags(sp);
2461 }
2462 2460
2463 if (uss) { 2461 if (uss) {
2464 void __user *ss_sp; 2462 void __user *ss_sp;
@@ -2466,10 +2464,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2466 int ss_flags; 2464 int ss_flags;
2467 2465
2468 error = -EFAULT; 2466 error = -EFAULT;
2469 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2467 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2470 || __get_user(ss_sp, &uss->ss_sp) 2468 goto out;
2471 || __get_user(ss_flags, &uss->ss_flags) 2469 error = __get_user(ss_sp, &uss->ss_sp) |
2472 || __get_user(ss_size, &uss->ss_size)) 2470 __get_user(ss_flags, &uss->ss_flags) |
2471 __get_user(ss_size, &uss->ss_size);
2472 if (error)
2473 goto out; 2473 goto out;
2474 2474
2475 error = -EPERM; 2475 error = -EPERM;
@@ -2501,13 +2501,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2501 current->sas_ss_size = ss_size; 2501 current->sas_ss_size = ss_size;
2502 } 2502 }
2503 2503
2504 error = 0;
2504 if (uoss) { 2505 if (uoss) {
2505 error = -EFAULT; 2506 error = -EFAULT;
2506 if (copy_to_user(uoss, &oss, sizeof(oss))) 2507 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2507 goto out; 2508 goto out;
2509 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2510 __put_user(oss.ss_size, &uoss->ss_size) |
2511 __put_user(oss.ss_flags, &uoss->ss_flags);
2508 } 2512 }
2509 2513
2510 error = 0;
2511out: 2514out:
2512 return error; 2515 return error;
2513} 2516}
diff --git a/kernel/smp.c b/kernel/smp.c
index ad63d8501207..94188b8ecc33 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -57,7 +57,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
57 return NOTIFY_BAD; 57 return NOTIFY_BAD;
58 break; 58 break;
59 59
60#ifdef CONFIG_CPU_HOTPLUG 60#ifdef CONFIG_HOTPLUG_CPU
61 case CPU_UP_CANCELED: 61 case CPU_UP_CANCELED:
62 case CPU_UP_CANCELED_FROZEN: 62 case CPU_UP_CANCELED_FROZEN:
63 63
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 3a94905fa5d2..eb5e131a0485 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -345,7 +345,9 @@ void open_softirq(int nr, void (*action)(struct softirq_action *))
345 softirq_vec[nr].action = action; 345 softirq_vec[nr].action = action;
346} 346}
347 347
348/* Tasklets */ 348/*
349 * Tasklets
350 */
349struct tasklet_head 351struct tasklet_head
350{ 352{
351 struct tasklet_struct *head; 353 struct tasklet_struct *head;
@@ -493,6 +495,66 @@ void tasklet_kill(struct tasklet_struct *t)
493 495
494EXPORT_SYMBOL(tasklet_kill); 496EXPORT_SYMBOL(tasklet_kill);
495 497
498/*
499 * tasklet_hrtimer
500 */
501
502/*
503 * The trampoline is called when the hrtimer expires. If this is
504 * called from the hrtimer interrupt then we schedule the tasklet as
505 * the timer callback function expects to run in softirq context. If
506 * it's called in softirq context anyway (i.e. high resolution timers
507 * disabled) then the hrtimer callback is called right away.
508 */
509static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
510{
511 struct tasklet_hrtimer *ttimer =
512 container_of(timer, struct tasklet_hrtimer, timer);
513
514 if (hrtimer_is_hres_active(timer)) {
515 tasklet_hi_schedule(&ttimer->tasklet);
516 return HRTIMER_NORESTART;
517 }
518 return ttimer->function(timer);
519}
520
521/*
522 * Helper function which calls the hrtimer callback from
523 * tasklet/softirq context
524 */
525static void __tasklet_hrtimer_trampoline(unsigned long data)
526{
527 struct tasklet_hrtimer *ttimer = (void *)data;
528 enum hrtimer_restart restart;
529
530 restart = ttimer->function(&ttimer->timer);
531 if (restart != HRTIMER_NORESTART)
532 hrtimer_restart(&ttimer->timer);
533}
534
535/**
536 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
537 * @ttimer: tasklet_hrtimer which is initialized
538 * @function: hrtimer callback funtion which gets called from softirq context
539 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
540 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
541 */
542void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
543 enum hrtimer_restart (*function)(struct hrtimer *),
544 clockid_t which_clock, enum hrtimer_mode mode)
545{
546 hrtimer_init(&ttimer->timer, which_clock, mode);
547 ttimer->timer.function = __hrtimer_tasklet_trampoline;
548 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
549 (unsigned long)ttimer);
550 ttimer->function = function;
551}
552EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
553
554/*
555 * Remote softirq bits
556 */
557
496DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); 558DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
497EXPORT_PER_CPU_SYMBOL(softirq_work_list); 559EXPORT_PER_CPU_SYMBOL(softirq_work_list);
498 560
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 592bf584d1d2..7466cb811251 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -513,7 +513,7 @@ static ssize_t sysfs_override_clocksource(struct sys_device *dev,
513 * Check to make sure we don't switch to a non-highres capable 513 * Check to make sure we don't switch to a non-highres capable
514 * clocksource if the tick code is in oneshot mode (highres or nohz) 514 * clocksource if the tick code is in oneshot mode (highres or nohz)
515 */ 515 */
516 if (tick_oneshot_mode_active() && 516 if (tick_oneshot_mode_active() && ovr &&
517 !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) { 517 !(ovr->flags & CLOCK_SOURCE_VALID_FOR_HRES)) {
518 printk(KERN_WARNING "%s clocksource is not HRT compatible. " 518 printk(KERN_WARNING "%s clocksource is not HRT compatible. "
519 "Cannot switch while in HRT/NOHZ mode\n", ovr->name); 519 "Cannot switch while in HRT/NOHZ mode\n", ovr->name);
diff --git a/kernel/timer.c b/kernel/timer.c
index 0b36b9e5cc8b..a7f07d5a6241 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -714,7 +714,7 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
714 * networking code - if the timer is re-modified 714 * networking code - if the timer is re-modified
715 * to be the same thing then just return: 715 * to be the same thing then just return:
716 */ 716 */
717 if (timer->expires == expires && timer_pending(timer)) 717 if (timer_pending(timer) && timer->expires == expires)
718 return 1; 718 return 1;
719 719
720 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); 720 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 24e3ff53b24b..094863416b2e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1617,7 +1617,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1617 1617
1618 mutex_lock(&ftrace_regex_lock); 1618 mutex_lock(&ftrace_regex_lock);
1619 if ((file->f_mode & FMODE_WRITE) && 1619 if ((file->f_mode & FMODE_WRITE) &&
1620 !(file->f_flags & O_APPEND)) 1620 (file->f_flags & O_TRUNC))
1621 ftrace_filter_reset(enable); 1621 ftrace_filter_reset(enable);
1622 1622
1623 if (file->f_mode & FMODE_READ) { 1623 if (file->f_mode & FMODE_READ) {
@@ -2527,7 +2527,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2527 2527
2528 mutex_lock(&graph_lock); 2528 mutex_lock(&graph_lock);
2529 if ((file->f_mode & FMODE_WRITE) && 2529 if ((file->f_mode & FMODE_WRITE) &&
2530 !(file->f_flags & O_APPEND)) { 2530 (file->f_flags & O_TRUNC)) {
2531 ftrace_graph_count = 0; 2531 ftrace_graph_count = 0;
2532 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2532 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2533 } 2533 }
@@ -2546,6 +2546,14 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2546} 2546}
2547 2547
2548static int 2548static int
2549ftrace_graph_release(struct inode *inode, struct file *file)
2550{
2551 if (file->f_mode & FMODE_READ)
2552 seq_release(inode, file);
2553 return 0;
2554}
2555
2556static int
2549ftrace_set_func(unsigned long *array, int *idx, char *buffer) 2557ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2550{ 2558{
2551 struct dyn_ftrace *rec; 2559 struct dyn_ftrace *rec;
@@ -2674,9 +2682,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2674} 2682}
2675 2683
2676static const struct file_operations ftrace_graph_fops = { 2684static const struct file_operations ftrace_graph_fops = {
2677 .open = ftrace_graph_open, 2685 .open = ftrace_graph_open,
2678 .read = seq_read, 2686 .read = seq_read,
2679 .write = ftrace_graph_write, 2687 .write = ftrace_graph_write,
2688 .release = ftrace_graph_release,
2680}; 2689};
2681#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2690#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2682 2691
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 51633d74a21e..da2c59d8f486 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1170,6 +1170,7 @@ ring_buffer_free(struct ring_buffer *buffer)
1170 1170
1171 put_online_cpus(); 1171 put_online_cpus();
1172 1172
1173 kfree(buffer->buffers);
1173 free_cpumask_var(buffer->cpumask); 1174 free_cpumask_var(buffer->cpumask);
1174 1175
1175 kfree(buffer); 1176 kfree(buffer);
@@ -2379,7 +2380,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
2379 */ 2380 */
2380 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 2381 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2381 2382
2382 if (!rb_try_to_discard(cpu_buffer, event)) 2383 if (rb_try_to_discard(cpu_buffer, event))
2383 goto out; 2384 goto out;
2384 2385
2385 /* 2386 /*
@@ -2990,7 +2991,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2990 * the box. Return the padding, and we will release 2991 * the box. Return the padding, and we will release
2991 * the current locks, and try again. 2992 * the current locks, and try again.
2992 */ 2993 */
2993 rb_advance_reader(cpu_buffer);
2994 return event; 2994 return event;
2995 2995
2996 case RINGBUF_TYPE_TIME_EXTEND: 2996 case RINGBUF_TYPE_TIME_EXTEND:
@@ -3093,7 +3093,7 @@ static inline int rb_ok_to_lock(void)
3093 * buffer too. A one time deal is all you get from reading 3093 * buffer too. A one time deal is all you get from reading
3094 * the ring buffer from an NMI. 3094 * the ring buffer from an NMI.
3095 */ 3095 */
3096 if (likely(!in_nmi() && !oops_in_progress)) 3096 if (likely(!in_nmi()))
3097 return 1; 3097 return 1;
3098 3098
3099 tracing_off_permanent(); 3099 tracing_off_permanent();
@@ -3126,6 +3126,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
3126 if (dolock) 3126 if (dolock)
3127 spin_lock(&cpu_buffer->reader_lock); 3127 spin_lock(&cpu_buffer->reader_lock);
3128 event = rb_buffer_peek(buffer, cpu, ts); 3128 event = rb_buffer_peek(buffer, cpu, ts);
3129 if (event && event->type_len == RINGBUF_TYPE_PADDING)
3130 rb_advance_reader(cpu_buffer);
3129 if (dolock) 3131 if (dolock)
3130 spin_unlock(&cpu_buffer->reader_lock); 3132 spin_unlock(&cpu_buffer->reader_lock);
3131 local_irq_restore(flags); 3133 local_irq_restore(flags);
@@ -3197,12 +3199,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
3197 spin_lock(&cpu_buffer->reader_lock); 3199 spin_lock(&cpu_buffer->reader_lock);
3198 3200
3199 event = rb_buffer_peek(buffer, cpu, ts); 3201 event = rb_buffer_peek(buffer, cpu, ts);
3200 if (!event) 3202 if (event)
3201 goto out_unlock; 3203 rb_advance_reader(cpu_buffer);
3202
3203 rb_advance_reader(cpu_buffer);
3204 3204
3205 out_unlock:
3206 if (dolock) 3205 if (dolock)
3207 spin_unlock(&cpu_buffer->reader_lock); 3206 spin_unlock(&cpu_buffer->reader_lock);
3208 local_irq_restore(flags); 3207 local_irq_restore(flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 0cfd1a62def1..e793cda91dd3 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
850} 850}
851EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
851 852
852struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 853struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
853 int type, 854 int type,
@@ -1857,7 +1858,7 @@ static int tracing_open(struct inode *inode, struct file *file)
1857 1858
1858 /* If this file was open for write, then erase contents */ 1859 /* If this file was open for write, then erase contents */
1859 if ((file->f_mode & FMODE_WRITE) && 1860 if ((file->f_mode & FMODE_WRITE) &&
1860 !(file->f_flags & O_APPEND)) { 1861 (file->f_flags & O_TRUNC)) {
1861 long cpu = (long) inode->i_private; 1862 long cpu = (long) inode->i_private;
1862 1863
1863 if (cpu == TRACE_PIPE_ALL_CPU) 1864 if (cpu == TRACE_PIPE_ALL_CPU)
@@ -2911,7 +2912,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
2911 break; 2912 break;
2912 } 2913 }
2913 2914
2914 trace_consume(iter); 2915 if (ret != TRACE_TYPE_NO_CONSUME)
2916 trace_consume(iter);
2915 rem -= count; 2917 rem -= count;
2916 if (!find_next_entry_inc(iter)) { 2918 if (!find_next_entry_inc(iter)) {
2917 rem = 0; 2919 rem = 0;
@@ -4056,8 +4058,11 @@ static void __ftrace_dump(bool disable_tracing)
4056 iter.pos = -1; 4058 iter.pos = -1;
4057 4059
4058 if (find_next_entry_inc(&iter) != NULL) { 4060 if (find_next_entry_inc(&iter) != NULL) {
4059 print_trace_line(&iter); 4061 int ret;
4060 trace_consume(&iter); 4062
4063 ret = print_trace_line(&iter);
4064 if (ret != TRACE_TYPE_NO_CONSUME)
4065 trace_consume(&iter);
4061 } 4066 }
4062 4067
4063 trace_printk_seq(&iter.seq); 4068 trace_printk_seq(&iter.seq);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9301f1263c5c..d682357e4b1f 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
439 int *ent_cpu, u64 *ent_ts); 439 int *ent_cpu, u64 *ent_ts);
440 440
441void tracing_generic_entry_update(struct trace_entry *entry,
442 unsigned long flags,
443 int pc);
444
445void default_wait_pipe(struct trace_iterator *iter); 441void default_wait_pipe(struct trace_iterator *iter);
446void poll_wait_pipe(struct trace_iterator *iter); 442void poll_wait_pipe(struct trace_iterator *iter);
447 443
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 5b5895afecfe..11ba5bb4ed0a 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id)
14 14
15 mutex_lock(&event_mutex); 15 mutex_lock(&event_mutex);
16 list_for_each_entry(event, &ftrace_events, list) { 16 list_for_each_entry(event, &ftrace_events, list) {
17 if (event->id == event_id) { 17 if (event->id == event_id && event->profile_enable) {
18 ret = event->profile_enable(event); 18 ret = event->profile_enable(event);
19 break; 19 break;
20 } 20 }
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 70ecb7653b46..e0cbede96783 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -378,7 +378,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file)
378 const struct seq_operations *seq_ops; 378 const struct seq_operations *seq_ops;
379 379
380 if ((file->f_mode & FMODE_WRITE) && 380 if ((file->f_mode & FMODE_WRITE) &&
381 !(file->f_flags & O_APPEND)) 381 (file->f_flags & O_TRUNC))
382 ftrace_clear_events(); 382 ftrace_clear_events();
383 383
384 seq_ops = inode->i_private; 384 seq_ops = inode->i_private;
@@ -945,7 +945,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
945 entry = trace_create_file("enable", 0644, call->dir, call, 945 entry = trace_create_file("enable", 0644, call->dir, call,
946 enable); 946 enable);
947 947
948 if (call->id) 948 if (call->id && call->profile_enable)
949 entry = trace_create_file("id", 0444, call->dir, call, 949 entry = trace_create_file("id", 0444, call->dir, call,
950 id); 950 id);
951 951
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index f97244a41a4f..3f4a251b7d16 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -927,9 +927,16 @@ print_graph_function(struct trace_iterator *iter)
927 927
928 switch (entry->type) { 928 switch (entry->type) {
929 case TRACE_GRAPH_ENT: { 929 case TRACE_GRAPH_ENT: {
930 struct ftrace_graph_ent_entry *field; 930 /*
931 * print_graph_entry() may consume the current event,
932 * thus @field may become invalid, so we need to save it.
933 * sizeof(struct ftrace_graph_ent_entry) is very small,
934 * it can be safely saved at the stack.
935 */
936 struct ftrace_graph_ent_entry *field, saved;
931 trace_assign_type(field, entry); 937 trace_assign_type(field, entry);
932 return print_graph_entry(field, s, iter); 938 saved = *field;
939 return print_graph_entry(&saved, s, iter);
933 } 940 }
934 case TRACE_GRAPH_RET: { 941 case TRACE_GRAPH_RET: {
935 struct ftrace_graph_ret_entry *field; 942 struct ftrace_graph_ret_entry *field;
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 7b6278110827..687699d365ae 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -176,7 +176,7 @@ static int t_show(struct seq_file *m, void *v)
176 const char *str = *fmt; 176 const char *str = *fmt;
177 int i; 177 int i;
178 178
179 seq_printf(m, "0x%lx : \"", (unsigned long)fmt); 179 seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
180 180
181 /* 181 /*
182 * Tabs and new lines need to be converted. 182 * Tabs and new lines need to be converted.
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index a4dc8d9ad1b1..0da1cff08d67 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -294,17 +294,14 @@ static const struct seq_operations stack_trace_seq_ops = {
294 294
295static int stack_trace_open(struct inode *inode, struct file *file) 295static int stack_trace_open(struct inode *inode, struct file *file)
296{ 296{
297 int ret; 297 return seq_open(file, &stack_trace_seq_ops);
298
299 ret = seq_open(file, &stack_trace_seq_ops);
300
301 return ret;
302} 298}
303 299
304static const struct file_operations stack_trace_fops = { 300static const struct file_operations stack_trace_fops = {
305 .open = stack_trace_open, 301 .open = stack_trace_open,
306 .read = seq_read, 302 .read = seq_read,
307 .llseek = seq_lseek, 303 .llseek = seq_lseek,
304 .release = seq_release,
308}; 305};
309 306
310int 307int
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index f069461f10bd..07c60b09258f 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -76,7 +76,7 @@ static struct rb_node *release_next(struct tracer_stat *ts,
76 } 76 }
77} 77}
78 78
79static void reset_stat_session(struct stat_session *session) 79static void __reset_stat_session(struct stat_session *session)
80{ 80{
81 struct rb_node *node = session->stat_root.rb_node; 81 struct rb_node *node = session->stat_root.rb_node;
82 82
@@ -86,10 +86,17 @@ static void reset_stat_session(struct stat_session *session)
86 session->stat_root = RB_ROOT; 86 session->stat_root = RB_ROOT;
87} 87}
88 88
89static void reset_stat_session(struct stat_session *session)
90{
91 mutex_lock(&session->stat_mutex);
92 __reset_stat_session(session);
93 mutex_unlock(&session->stat_mutex);
94}
95
89static void destroy_session(struct stat_session *session) 96static void destroy_session(struct stat_session *session)
90{ 97{
91 debugfs_remove(session->file); 98 debugfs_remove(session->file);
92 reset_stat_session(session); 99 __reset_stat_session(session);
93 mutex_destroy(&session->stat_mutex); 100 mutex_destroy(&session->stat_mutex);
94 kfree(session); 101 kfree(session);
95} 102}
@@ -153,7 +160,7 @@ static int stat_seq_init(struct stat_session *session)
153 int i; 160 int i;
154 161
155 mutex_lock(&session->stat_mutex); 162 mutex_lock(&session->stat_mutex);
156 reset_stat_session(session); 163 __reset_stat_session(session);
157 164
158 if (!ts->stat_cmp) 165 if (!ts->stat_cmp)
159 ts->stat_cmp = dummy_cmp; 166 ts->stat_cmp = dummy_cmp;
@@ -186,7 +193,7 @@ exit:
186 return ret; 193 return ret;
187 194
188exit_free_rbtree: 195exit_free_rbtree:
189 reset_stat_session(session); 196 __reset_stat_session(session);
190 mutex_unlock(&session->stat_mutex); 197 mutex_unlock(&session->stat_mutex);
191 return ret; 198 return ret;
192} 199}
@@ -253,16 +260,21 @@ static const struct seq_operations trace_stat_seq_ops = {
253static int tracing_stat_open(struct inode *inode, struct file *file) 260static int tracing_stat_open(struct inode *inode, struct file *file)
254{ 261{
255 int ret; 262 int ret;
256 263 struct seq_file *m;
257 struct stat_session *session = inode->i_private; 264 struct stat_session *session = inode->i_private;
258 265
266 ret = stat_seq_init(session);
267 if (ret)
268 return ret;
269
259 ret = seq_open(file, &trace_stat_seq_ops); 270 ret = seq_open(file, &trace_stat_seq_ops);
260 if (!ret) { 271 if (ret) {
261 struct seq_file *m = file->private_data; 272 reset_stat_session(session);
262 m->private = session; 273 return ret;
263 ret = stat_seq_init(session);
264 } 274 }
265 275
276 m = file->private_data;
277 m->private = session;
266 return ret; 278 return ret;
267} 279}
268 280
@@ -273,11 +285,9 @@ static int tracing_stat_release(struct inode *i, struct file *f)
273{ 285{
274 struct stat_session *session = i->i_private; 286 struct stat_session *session = i->i_private;
275 287
276 mutex_lock(&session->stat_mutex);
277 reset_stat_session(session); 288 reset_stat_session(session);
278 mutex_unlock(&session->stat_mutex);
279 289
280 return 0; 290 return seq_release(i, f);
281} 291}
282 292
283static const struct file_operations tracing_stat_fops = { 293static const struct file_operations tracing_stat_fops = {