aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-05-10 05:59:37 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-05-10 08:20:42 -0400
commitdbb6be6d5e974c42bbecd183effaa0df69e1dd8b (patch)
tree5735cb47e70853d057a9881dd0ce44b83e88fa63 /kernel
parent6a867a395558a7f882d041783e4cdea6744ca2bf (diff)
parentb57f95a38233a2e73b679bea4a5453a1cc2a1cc9 (diff)
Merge branch 'linus' into timers/core
Reason: Further posix_cpu_timer patches depend on mainline changes Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/async.c1
-rw-r--r--kernel/audit.c1
-rw-r--r--kernel/audit_tree.c1
-rw-r--r--kernel/audit_watch.c1
-rw-r--r--kernel/auditfilter.c1
-rw-r--r--kernel/auditsc.c3
-rw-r--r--kernel/cgroup.c17
-rw-r--r--kernel/cgroup_freezer.c15
-rw-r--r--kernel/compat.c1
-rw-r--r--kernel/cpu.c1
-rw-r--r--kernel/cpuset.c106
-rw-r--r--kernel/cred.c11
-rw-r--r--kernel/early_res.c6
-rw-r--r--kernel/exit.c3
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/irq/chip.c35
-rw-r--r--kernel/irq/manage.c32
-rw-r--r--kernel/irq/numa_migrate.c1
-rw-r--r--kernel/irq/proc.c1
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/kgdb.c205
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/latencytop.c1
-rw-r--r--kernel/lockdep.c31
-rw-r--r--kernel/module.c139
-rw-r--r--kernel/nsproxy.c1
-rw-r--r--kernel/padata.c1
-rw-r--r--kernel/perf_event.c105
-rw-r--r--kernel/pid_namespace.c1
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/power/hibernate.c1
-rw-r--r--kernel/power/hibernate_nvs.c1
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/power/snapshot.c1
-rw-r--r--kernel/power/suspend.c1
-rw-r--r--kernel/power/swap.c1
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/rcupdate.c41
-rw-r--r--kernel/res_counter.c1
-rw-r--r--kernel/resource.c44
-rw-r--r--kernel/sched.c33
-rw-r--r--kernel/sched_cpupri.c1
-rw-r--r--kernel/sched_debug.c6
-rw-r--r--kernel/slow-work.c2
-rw-r--r--kernel/slow-work.h8
-rw-r--r--kernel/smp.c1
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/srcu.c1
-rw-r--r--kernel/sys.c3
-rw-r--r--kernel/sysctl_binary.c1
-rw-r--r--kernel/taskstats.c1
-rw-r--r--kernel/time.c1
-rw-r--r--kernel/time/tick-oneshot.c52
-rw-r--r--kernel/time/timecompare.c1
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer_list.c3
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/Makefile2
-rw-r--r--kernel/trace/blktrace.c1
-rw-r--r--kernel/trace/ftrace.c1
-rw-r--r--kernel/trace/power-traces.c1
-rw-r--r--kernel/trace/ring_buffer.c23
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_clock.c4
-rw-r--r--kernel/trace/trace_event_perf.c (renamed from kernel/trace/trace_event_profile.c)59
-rw-r--r--kernel/trace/trace_events.c3
-rw-r--r--kernel/trace/trace_events_filter.c1
-rw-r--r--kernel/trace/trace_functions_graph.c1
-rw-r--r--kernel/trace/trace_kprobe.c29
-rw-r--r--kernel/trace/trace_ksym.c1
-rw-r--r--kernel/trace/trace_mmiotrace.c1
-rw-r--r--kernel/trace/trace_selftest.c1
-rw-r--r--kernel/trace/trace_stat.c1
-rw-r--r--kernel/trace/trace_syscalls.c73
-rw-r--r--kernel/trace/trace_workqueue.c1
-rw-r--r--kernel/workqueue.c2
77 files changed, 755 insertions, 413 deletions
diff --git a/kernel/async.c b/kernel/async.c
index 27235f5de198..15319d6c18fe 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -56,6 +56,7 @@ asynchronous and synchronous parts of the kernel.
56#include <linux/init.h> 56#include <linux/init.h>
57#include <linux/kthread.h> 57#include <linux/kthread.h>
58#include <linux/delay.h> 58#include <linux/delay.h>
59#include <linux/slab.h>
59#include <asm/atomic.h> 60#include <asm/atomic.h>
60 61
61static async_cookie_t next_cookie = 1; 62static async_cookie_t next_cookie = 1;
diff --git a/kernel/audit.c b/kernel/audit.c
index 78f7f86aa238..c71bd26631a2 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -46,6 +46,7 @@
46#include <asm/atomic.h> 46#include <asm/atomic.h>
47#include <linux/mm.h> 47#include <linux/mm.h>
48#include <linux/module.h> 48#include <linux/module.h>
49#include <linux/slab.h>
49#include <linux/err.h> 50#include <linux/err.h>
50#include <linux/kthread.h> 51#include <linux/kthread.h>
51 52
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 028e85663f27..46a57b57a335 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -3,6 +3,7 @@
3#include <linux/namei.h> 3#include <linux/namei.h>
4#include <linux/mount.h> 4#include <linux/mount.h>
5#include <linux/kthread.h> 5#include <linux/kthread.h>
6#include <linux/slab.h>
6 7
7struct audit_tree; 8struct audit_tree;
8struct audit_chunk; 9struct audit_chunk;
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index cc7e87936cbc..8df43696f4ba 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -27,6 +27,7 @@
27#include <linux/namei.h> 27#include <linux/namei.h>
28#include <linux/netlink.h> 28#include <linux/netlink.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/slab.h>
30#include <linux/inotify.h> 31#include <linux/inotify.h>
31#include <linux/security.h> 32#include <linux/security.h>
32#include "audit.h" 33#include "audit.h"
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index a70604047f3c..ce08041f578d 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -27,6 +27,7 @@
27#include <linux/namei.h> 27#include <linux/namei.h>
28#include <linux/netlink.h> 28#include <linux/netlink.h>
29#include <linux/sched.h> 29#include <linux/sched.h>
30#include <linux/slab.h>
30#include <linux/security.h> 31#include <linux/security.h>
31#include "audit.h" 32#include "audit.h"
32 33
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index f3a461c0970a..3828ad5fb8f1 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -49,6 +49,7 @@
49#include <linux/namei.h> 49#include <linux/namei.h>
50#include <linux/mm.h> 50#include <linux/mm.h>
51#include <linux/module.h> 51#include <linux/module.h>
52#include <linux/slab.h>
52#include <linux/mount.h> 53#include <linux/mount.h>
53#include <linux/socket.h> 54#include <linux/socket.h>
54#include <linux/mqueue.h> 55#include <linux/mqueue.h>
@@ -1893,7 +1894,7 @@ static int audit_inc_name_count(struct audit_context *context,
1893{ 1894{
1894 if (context->name_count >= AUDIT_NAMES) { 1895 if (context->name_count >= AUDIT_NAMES) {
1895 if (inode) 1896 if (inode)
1896 printk(KERN_DEBUG "name_count maxed, losing inode data: " 1897 printk(KERN_DEBUG "audit: name_count maxed, losing inode data: "
1897 "dev=%02x:%02x, inode=%lu\n", 1898 "dev=%02x:%02x, inode=%lu\n",
1898 MAJOR(inode->i_sb->s_dev), 1899 MAJOR(inode->i_sb->s_dev),
1899 MINOR(inode->i_sb->s_dev), 1900 MINOR(inode->i_sb->s_dev),
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index ef909a329750..3a53c771e503 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -27,7 +27,6 @@
27 */ 27 */
28 28
29#include <linux/cgroup.h> 29#include <linux/cgroup.h>
30#include <linux/module.h>
31#include <linux/ctype.h> 30#include <linux/ctype.h>
32#include <linux/errno.h> 31#include <linux/errno.h>
33#include <linux/fs.h> 32#include <linux/fs.h>
@@ -1647,7 +1646,9 @@ static inline struct cftype *__d_cft(struct dentry *dentry)
1647int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) 1646int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1648{ 1647{
1649 char *start; 1648 char *start;
1650 struct dentry *dentry = rcu_dereference(cgrp->dentry); 1649 struct dentry *dentry = rcu_dereference_check(cgrp->dentry,
1650 rcu_read_lock_held() ||
1651 cgroup_lock_is_held());
1651 1652
1652 if (!dentry || cgrp == dummytop) { 1653 if (!dentry || cgrp == dummytop) {
1653 /* 1654 /*
@@ -1663,13 +1664,17 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1663 *--start = '\0'; 1664 *--start = '\0';
1664 for (;;) { 1665 for (;;) {
1665 int len = dentry->d_name.len; 1666 int len = dentry->d_name.len;
1667
1666 if ((start -= len) < buf) 1668 if ((start -= len) < buf)
1667 return -ENAMETOOLONG; 1669 return -ENAMETOOLONG;
1668 memcpy(start, cgrp->dentry->d_name.name, len); 1670 memcpy(start, dentry->d_name.name, len);
1669 cgrp = cgrp->parent; 1671 cgrp = cgrp->parent;
1670 if (!cgrp) 1672 if (!cgrp)
1671 break; 1673 break;
1672 dentry = rcu_dereference(cgrp->dentry); 1674
1675 dentry = rcu_dereference_check(cgrp->dentry,
1676 rcu_read_lock_held() ||
1677 cgroup_lock_is_held());
1673 if (!cgrp->parent) 1678 if (!cgrp->parent)
1674 continue; 1679 continue;
1675 if (--start < buf) 1680 if (--start < buf)
@@ -4556,13 +4561,13 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
4556{ 4561{
4557 int subsys_id, i, depth = 0; 4562 int subsys_id, i, depth = 0;
4558 struct cgroup_subsys_state *parent_css, *child_css; 4563 struct cgroup_subsys_state *parent_css, *child_css;
4559 struct css_id *child_id, *parent_id = NULL; 4564 struct css_id *child_id, *parent_id;
4560 4565
4561 subsys_id = ss->subsys_id; 4566 subsys_id = ss->subsys_id;
4562 parent_css = parent->subsys[subsys_id]; 4567 parent_css = parent->subsys[subsys_id];
4563 child_css = child->subsys[subsys_id]; 4568 child_css = child->subsys[subsys_id];
4564 depth = css_depth(parent_css) + 1;
4565 parent_id = parent_css->id; 4569 parent_id = parent_css->id;
4570 depth = parent_id->depth;
4566 4571
4567 child_id = get_new_cssid(ss, depth); 4572 child_id = get_new_cssid(ss, depth);
4568 if (IS_ERR(child_id)) 4573 if (IS_ERR(child_id))
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 59e9ef6aab40..e5c0244962b0 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/slab.h>
18#include <linux/cgroup.h> 19#include <linux/cgroup.h>
19#include <linux/fs.h> 20#include <linux/fs.h>
20#include <linux/uaccess.h> 21#include <linux/uaccess.h>
@@ -47,17 +48,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
47 struct freezer, css); 48 struct freezer, css);
48} 49}
49 50
50int cgroup_frozen(struct task_struct *task) 51int cgroup_freezing_or_frozen(struct task_struct *task)
51{ 52{
52 struct freezer *freezer; 53 struct freezer *freezer;
53 enum freezer_state state; 54 enum freezer_state state;
54 55
55 task_lock(task); 56 task_lock(task);
56 freezer = task_freezer(task); 57 freezer = task_freezer(task);
57 state = freezer->state; 58 if (!freezer->css.cgroup->parent)
59 state = CGROUP_THAWED; /* root cgroup can't be frozen */
60 else
61 state = freezer->state;
58 task_unlock(task); 62 task_unlock(task);
59 63
60 return state == CGROUP_FROZEN; 64 return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
61} 65}
62 66
63/* 67/*
@@ -201,9 +205,12 @@ static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
201 * No lock is needed, since the task isn't on tasklist yet, 205 * No lock is needed, since the task isn't on tasklist yet,
202 * so it can't be moved to another cgroup, which means the 206 * so it can't be moved to another cgroup, which means the
203 * freezer won't be removed and will be valid during this 207 * freezer won't be removed and will be valid during this
204 * function call. 208 * function call. Nevertheless, apply RCU read-side critical
209 * section to suppress RCU lockdep false positives.
205 */ 210 */
211 rcu_read_lock();
206 freezer = task_freezer(task); 212 freezer = task_freezer(task);
213 rcu_read_unlock();
207 214
208 /* 215 /*
209 * The root cgroup is non-freezable, so we can skip the 216 * The root cgroup is non-freezable, so we can skip the
diff --git a/kernel/compat.c b/kernel/compat.c
index f6c204f07ea6..7f40e9275fd9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -25,6 +25,7 @@
25#include <linux/posix-timers.h> 25#include <linux/posix-timers.h>
26#include <linux/times.h> 26#include <linux/times.h>
27#include <linux/ptrace.h> 27#include <linux/ptrace.h>
28#include <linux/gfp.h>
28 29
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30 31
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f8cced2692b3..25bba73b1be3 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -14,6 +14,7 @@
14#include <linux/kthread.h> 14#include <linux/kthread.h>
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gfp.h>
17 18
18#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
19/* Serializes the updates to cpu_online_mask, cpu_present_mask */ 20/* Serializes the updates to cpu_online_mask, cpu_present_mask */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index ba401fab459f..d10946748ec2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -920,9 +920,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
920 * call to guarantee_online_mems(), as we know no one is changing 920 * call to guarantee_online_mems(), as we know no one is changing
921 * our task's cpuset. 921 * our task's cpuset.
922 * 922 *
923 * Hold callback_mutex around the two modifications of our tasks
924 * mems_allowed to synchronize with cpuset_mems_allowed().
925 *
926 * While the mm_struct we are migrating is typically from some 923 * While the mm_struct we are migrating is typically from some
927 * other task, the task_struct mems_allowed that we are hacking 924 * other task, the task_struct mems_allowed that we are hacking
928 * is for our current task, which must allocate new pages for that 925 * is for our current task, which must allocate new pages for that
@@ -973,15 +970,20 @@ static void cpuset_change_nodemask(struct task_struct *p,
973 struct cpuset *cs; 970 struct cpuset *cs;
974 int migrate; 971 int migrate;
975 const nodemask_t *oldmem = scan->data; 972 const nodemask_t *oldmem = scan->data;
976 nodemask_t newmems; 973 NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL);
974
975 if (!newmems)
976 return;
977 977
978 cs = cgroup_cs(scan->cg); 978 cs = cgroup_cs(scan->cg);
979 guarantee_online_mems(cs, &newmems); 979 guarantee_online_mems(cs, newmems);
980 980
981 task_lock(p); 981 task_lock(p);
982 cpuset_change_task_nodemask(p, &newmems); 982 cpuset_change_task_nodemask(p, newmems);
983 task_unlock(p); 983 task_unlock(p);
984 984
985 NODEMASK_FREE(newmems);
986
985 mm = get_task_mm(p); 987 mm = get_task_mm(p);
986 if (!mm) 988 if (!mm)
987 return; 989 return;
@@ -1051,16 +1053,21 @@ static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1051static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, 1053static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1052 const char *buf) 1054 const char *buf)
1053{ 1055{
1054 nodemask_t oldmem; 1056 NODEMASK_ALLOC(nodemask_t, oldmem, GFP_KERNEL);
1055 int retval; 1057 int retval;
1056 struct ptr_heap heap; 1058 struct ptr_heap heap;
1057 1059
1060 if (!oldmem)
1061 return -ENOMEM;
1062
1058 /* 1063 /*
1059 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; 1064 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
1060 * it's read-only 1065 * it's read-only
1061 */ 1066 */
1062 if (cs == &top_cpuset) 1067 if (cs == &top_cpuset) {
1063 return -EACCES; 1068 retval = -EACCES;
1069 goto done;
1070 }
1064 1071
1065 /* 1072 /*
1066 * An empty mems_allowed is ok iff there are no tasks in the cpuset. 1073 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
@@ -1076,11 +1083,13 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1076 goto done; 1083 goto done;
1077 1084
1078 if (!nodes_subset(trialcs->mems_allowed, 1085 if (!nodes_subset(trialcs->mems_allowed,
1079 node_states[N_HIGH_MEMORY])) 1086 node_states[N_HIGH_MEMORY])) {
1080 return -EINVAL; 1087 retval = -EINVAL;
1088 goto done;
1089 }
1081 } 1090 }
1082 oldmem = cs->mems_allowed; 1091 *oldmem = cs->mems_allowed;
1083 if (nodes_equal(oldmem, trialcs->mems_allowed)) { 1092 if (nodes_equal(*oldmem, trialcs->mems_allowed)) {
1084 retval = 0; /* Too easy - nothing to do */ 1093 retval = 0; /* Too easy - nothing to do */
1085 goto done; 1094 goto done;
1086 } 1095 }
@@ -1096,10 +1105,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1096 cs->mems_allowed = trialcs->mems_allowed; 1105 cs->mems_allowed = trialcs->mems_allowed;
1097 mutex_unlock(&callback_mutex); 1106 mutex_unlock(&callback_mutex);
1098 1107
1099 update_tasks_nodemask(cs, &oldmem, &heap); 1108 update_tasks_nodemask(cs, oldmem, &heap);
1100 1109
1101 heap_free(&heap); 1110 heap_free(&heap);
1102done: 1111done:
1112 NODEMASK_FREE(oldmem);
1103 return retval; 1113 return retval;
1104} 1114}
1105 1115
@@ -1384,40 +1394,47 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
1384 struct cgroup *oldcont, struct task_struct *tsk, 1394 struct cgroup *oldcont, struct task_struct *tsk,
1385 bool threadgroup) 1395 bool threadgroup)
1386{ 1396{
1387 nodemask_t from, to;
1388 struct mm_struct *mm; 1397 struct mm_struct *mm;
1389 struct cpuset *cs = cgroup_cs(cont); 1398 struct cpuset *cs = cgroup_cs(cont);
1390 struct cpuset *oldcs = cgroup_cs(oldcont); 1399 struct cpuset *oldcs = cgroup_cs(oldcont);
1400 NODEMASK_ALLOC(nodemask_t, from, GFP_KERNEL);
1401 NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL);
1402
1403 if (from == NULL || to == NULL)
1404 goto alloc_fail;
1391 1405
1392 if (cs == &top_cpuset) { 1406 if (cs == &top_cpuset) {
1393 cpumask_copy(cpus_attach, cpu_possible_mask); 1407 cpumask_copy(cpus_attach, cpu_possible_mask);
1394 to = node_possible_map;
1395 } else { 1408 } else {
1396 guarantee_online_cpus(cs, cpus_attach); 1409 guarantee_online_cpus(cs, cpus_attach);
1397 guarantee_online_mems(cs, &to);
1398 } 1410 }
1411 guarantee_online_mems(cs, to);
1399 1412
1400 /* do per-task migration stuff possibly for each in the threadgroup */ 1413 /* do per-task migration stuff possibly for each in the threadgroup */
1401 cpuset_attach_task(tsk, &to, cs); 1414 cpuset_attach_task(tsk, to, cs);
1402 if (threadgroup) { 1415 if (threadgroup) {
1403 struct task_struct *c; 1416 struct task_struct *c;
1404 rcu_read_lock(); 1417 rcu_read_lock();
1405 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { 1418 list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
1406 cpuset_attach_task(c, &to, cs); 1419 cpuset_attach_task(c, to, cs);
1407 } 1420 }
1408 rcu_read_unlock(); 1421 rcu_read_unlock();
1409 } 1422 }
1410 1423
1411 /* change mm; only needs to be done once even if threadgroup */ 1424 /* change mm; only needs to be done once even if threadgroup */
1412 from = oldcs->mems_allowed; 1425 *from = oldcs->mems_allowed;
1413 to = cs->mems_allowed; 1426 *to = cs->mems_allowed;
1414 mm = get_task_mm(tsk); 1427 mm = get_task_mm(tsk);
1415 if (mm) { 1428 if (mm) {
1416 mpol_rebind_mm(mm, &to); 1429 mpol_rebind_mm(mm, to);
1417 if (is_memory_migrate(cs)) 1430 if (is_memory_migrate(cs))
1418 cpuset_migrate_mm(mm, &from, &to); 1431 cpuset_migrate_mm(mm, from, to);
1419 mmput(mm); 1432 mmput(mm);
1420 } 1433 }
1434
1435alloc_fail:
1436 NODEMASK_FREE(from);
1437 NODEMASK_FREE(to);
1421} 1438}
1422 1439
1423/* The various types of files and directories in a cpuset file system */ 1440/* The various types of files and directories in a cpuset file system */
@@ -1562,13 +1579,21 @@ static int cpuset_sprintf_cpulist(char *page, struct cpuset *cs)
1562 1579
1563static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) 1580static int cpuset_sprintf_memlist(char *page, struct cpuset *cs)
1564{ 1581{
1565 nodemask_t mask; 1582 NODEMASK_ALLOC(nodemask_t, mask, GFP_KERNEL);
1583 int retval;
1584
1585 if (mask == NULL)
1586 return -ENOMEM;
1566 1587
1567 mutex_lock(&callback_mutex); 1588 mutex_lock(&callback_mutex);
1568 mask = cs->mems_allowed; 1589 *mask = cs->mems_allowed;
1569 mutex_unlock(&callback_mutex); 1590 mutex_unlock(&callback_mutex);
1570 1591
1571 return nodelist_scnprintf(page, PAGE_SIZE, mask); 1592 retval = nodelist_scnprintf(page, PAGE_SIZE, *mask);
1593
1594 NODEMASK_FREE(mask);
1595
1596 return retval;
1572} 1597}
1573 1598
1574static ssize_t cpuset_common_file_read(struct cgroup *cont, 1599static ssize_t cpuset_common_file_read(struct cgroup *cont,
@@ -1997,7 +2022,10 @@ static void scan_for_empty_cpusets(struct cpuset *root)
1997 struct cpuset *cp; /* scans cpusets being updated */ 2022 struct cpuset *cp; /* scans cpusets being updated */
1998 struct cpuset *child; /* scans child cpusets of cp */ 2023 struct cpuset *child; /* scans child cpusets of cp */
1999 struct cgroup *cont; 2024 struct cgroup *cont;
2000 nodemask_t oldmems; 2025 NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL);
2026
2027 if (oldmems == NULL)
2028 return;
2001 2029
2002 list_add_tail((struct list_head *)&root->stack_list, &queue); 2030 list_add_tail((struct list_head *)&root->stack_list, &queue);
2003 2031
@@ -2014,7 +2042,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2014 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) 2042 nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
2015 continue; 2043 continue;
2016 2044
2017 oldmems = cp->mems_allowed; 2045 *oldmems = cp->mems_allowed;
2018 2046
2019 /* Remove offline cpus and mems from this cpuset. */ 2047 /* Remove offline cpus and mems from this cpuset. */
2020 mutex_lock(&callback_mutex); 2048 mutex_lock(&callback_mutex);
@@ -2030,9 +2058,10 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2030 remove_tasks_in_empty_cpuset(cp); 2058 remove_tasks_in_empty_cpuset(cp);
2031 else { 2059 else {
2032 update_tasks_cpumask(cp, NULL); 2060 update_tasks_cpumask(cp, NULL);
2033 update_tasks_nodemask(cp, &oldmems, NULL); 2061 update_tasks_nodemask(cp, oldmems, NULL);
2034 } 2062 }
2035 } 2063 }
2064 NODEMASK_FREE(oldmems);
2036} 2065}
2037 2066
2038/* 2067/*
@@ -2090,20 +2119,33 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
2090static int cpuset_track_online_nodes(struct notifier_block *self, 2119static int cpuset_track_online_nodes(struct notifier_block *self,
2091 unsigned long action, void *arg) 2120 unsigned long action, void *arg)
2092{ 2121{
2122 NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL);
2123
2124 if (oldmems == NULL)
2125 return NOTIFY_DONE;
2126
2093 cgroup_lock(); 2127 cgroup_lock();
2094 switch (action) { 2128 switch (action) {
2095 case MEM_ONLINE: 2129 case MEM_ONLINE:
2096 case MEM_OFFLINE: 2130 *oldmems = top_cpuset.mems_allowed;
2097 mutex_lock(&callback_mutex); 2131 mutex_lock(&callback_mutex);
2098 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2132 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2099 mutex_unlock(&callback_mutex); 2133 mutex_unlock(&callback_mutex);
2100 if (action == MEM_OFFLINE) 2134 update_tasks_nodemask(&top_cpuset, oldmems, NULL);
2101 scan_for_empty_cpusets(&top_cpuset); 2135 break;
2136 case MEM_OFFLINE:
2137 /*
2138 * needn't update top_cpuset.mems_allowed explicitly because
2139 * scan_for_empty_cpusets() will update it.
2140 */
2141 scan_for_empty_cpusets(&top_cpuset);
2102 break; 2142 break;
2103 default: 2143 default:
2104 break; 2144 break;
2105 } 2145 }
2106 cgroup_unlock(); 2146 cgroup_unlock();
2147
2148 NODEMASK_FREE(oldmems);
2107 return NOTIFY_OK; 2149 return NOTIFY_OK;
2108} 2150}
2109#endif 2151#endif
diff --git a/kernel/cred.c b/kernel/cred.c
index 1ed8ca18790c..62af1816c235 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/cred.h> 12#include <linux/cred.h>
13#include <linux/slab.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/key.h> 15#include <linux/key.h>
15#include <linux/keyctl.h> 16#include <linux/keyctl.h>
@@ -364,7 +365,7 @@ struct cred *prepare_usermodehelper_creds(void)
364 365
365 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC); 366 new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
366 if (!new) 367 if (!new)
367 return NULL; 368 goto free_tgcred;
368 369
369 kdebug("prepare_usermodehelper_creds() alloc %p", new); 370 kdebug("prepare_usermodehelper_creds() alloc %p", new);
370 371
@@ -398,6 +399,12 @@ struct cred *prepare_usermodehelper_creds(void)
398error: 399error:
399 put_cred(new); 400 put_cred(new);
400 return NULL; 401 return NULL;
402
403free_tgcred:
404#ifdef CONFIG_KEYS
405 kfree(tgcred);
406#endif
407 return NULL;
401} 408}
402 409
403/* 410/*
@@ -786,8 +793,6 @@ bool creds_are_invalid(const struct cred *cred)
786{ 793{
787 if (cred->magic != CRED_MAGIC) 794 if (cred->magic != CRED_MAGIC)
788 return true; 795 return true;
789 if (atomic_read(&cred->usage) < atomic_read(&cred->subscribers))
790 return true;
791#ifdef CONFIG_SECURITY_SELINUX 796#ifdef CONFIG_SECURITY_SELINUX
792 if (selinux_is_enabled()) { 797 if (selinux_is_enabled()) {
793 if ((unsigned long) cred->security < PAGE_SIZE) 798 if ((unsigned long) cred->security < PAGE_SIZE)
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 3cb2c661bb78..31aa9332ef3f 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end)
333 struct early_res *r; 333 struct early_res *r;
334 int i; 334 int i;
335 335
336 if (start == end)
337 return;
338
339 if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end))
340 return;
341
336try_next: 342try_next:
337 i = find_overlapped_early(start, end); 343 i = find_overlapped_early(start, end);
338 if (i >= max_early_res) 344 if (i >= max_early_res)
diff --git a/kernel/exit.c b/kernel/exit.c
index cce59cb5ee6a..7f2683a10ac4 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -953,7 +953,8 @@ NORET_TYPE void do_exit(long code)
953 953
954 acct_update_integrals(tsk); 954 acct_update_integrals(tsk);
955 /* sync mm's RSS info before statistics gathering */ 955 /* sync mm's RSS info before statistics gathering */
956 sync_mm_rss(tsk, tsk->mm); 956 if (tsk->mm)
957 sync_mm_rss(tsk, tsk->mm);
957 group_dead = atomic_dec_and_test(&tsk->signal->live); 958 group_dead = atomic_dec_and_test(&tsk->signal->live);
958 if (group_dead) { 959 if (group_dead) {
959 hrtimer_cancel(&tsk->signal->real_timer); 960 hrtimer_cancel(&tsk->signal->real_timer);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4799c5f0e6d0..44b0791b0a2e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1052,6 +1052,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1052 p->prev_utime = cputime_zero; 1052 p->prev_utime = cputime_zero;
1053 p->prev_stime = cputime_zero; 1053 p->prev_stime = cputime_zero;
1054#endif 1054#endif
1055#if defined(SPLIT_RSS_COUNTING)
1056 memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1057#endif
1055 1058
1056 p->default_timer_slack_ns = current->timer_slack_ns; 1059 p->default_timer_slack_ns = current->timer_slack_ns;
1057 1060
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 42ec11b2af8a..b7091d5ca2f8 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
359 if (desc->chip->ack) 359 if (desc->chip->ack)
360 desc->chip->ack(irq); 360 desc->chip->ack(irq);
361 } 361 }
362 desc->status |= IRQ_MASKED;
363}
364
365static inline void mask_irq(struct irq_desc *desc, int irq)
366{
367 if (desc->chip->mask) {
368 desc->chip->mask(irq);
369 desc->status |= IRQ_MASKED;
370 }
371}
372
373static inline void unmask_irq(struct irq_desc *desc, int irq)
374{
375 if (desc->chip->unmask) {
376 desc->chip->unmask(irq);
377 desc->status &= ~IRQ_MASKED;
378 }
362} 379}
363 380
364/* 381/*
@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
484 raw_spin_lock(&desc->lock); 501 raw_spin_lock(&desc->lock);
485 desc->status &= ~IRQ_INPROGRESS; 502 desc->status &= ~IRQ_INPROGRESS;
486 503
487 if (unlikely(desc->status & IRQ_ONESHOT)) 504 if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
488 desc->status |= IRQ_MASKED; 505 unmask_irq(desc, irq);
489 else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
490 desc->chip->unmask(irq);
491out_unlock: 506out_unlock:
492 raw_spin_unlock(&desc->lock); 507 raw_spin_unlock(&desc->lock);
493} 508}
@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
524 action = desc->action; 539 action = desc->action;
525 if (unlikely(!action || (desc->status & IRQ_DISABLED))) { 540 if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
526 desc->status |= IRQ_PENDING; 541 desc->status |= IRQ_PENDING;
527 if (desc->chip->mask) 542 mask_irq(desc, irq);
528 desc->chip->mask(irq);
529 goto out; 543 goto out;
530 } 544 }
531 545
@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
593 irqreturn_t action_ret; 607 irqreturn_t action_ret;
594 608
595 if (unlikely(!action)) { 609 if (unlikely(!action)) {
596 desc->chip->mask(irq); 610 mask_irq(desc, irq);
597 goto out_unlock; 611 goto out_unlock;
598 } 612 }
599 613
@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
605 if (unlikely((desc->status & 619 if (unlikely((desc->status &
606 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == 620 (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
607 (IRQ_PENDING | IRQ_MASKED))) { 621 (IRQ_PENDING | IRQ_MASKED))) {
608 desc->chip->unmask(irq); 622 unmask_irq(desc, irq);
609 desc->status &= ~IRQ_MASKED;
610 } 623 }
611 624
612 desc->status &= ~IRQ_PENDING; 625 desc->status &= ~IRQ_PENDING;
@@ -716,7 +729,7 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
716 __set_irq_handler(irq, handle, 0, name); 729 __set_irq_handler(irq, handle, 0, name);
717} 730}
718 731
719void __init set_irq_noprobe(unsigned int irq) 732void set_irq_noprobe(unsigned int irq)
720{ 733{
721 struct irq_desc *desc = irq_to_desc(irq); 734 struct irq_desc *desc = irq_to_desc(irq);
722 unsigned long flags; 735 unsigned long flags;
@@ -731,7 +744,7 @@ void __init set_irq_noprobe(unsigned int irq)
731 raw_spin_unlock_irqrestore(&desc->lock, flags); 744 raw_spin_unlock_irqrestore(&desc->lock, flags);
732} 745}
733 746
734void __init set_irq_probe(unsigned int irq) 747void set_irq_probe(unsigned int irq)
735{ 748{
736 struct irq_desc *desc = irq_to_desc(irq); 749 struct irq_desc *desc = irq_to_desc(irq);
737 unsigned long flags; 750 unsigned long flags;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index eb6078ca60c7..704e488730a5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -382,6 +382,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
382{ 382{
383 struct irq_desc *desc = irq_to_desc(irq); 383 struct irq_desc *desc = irq_to_desc(irq);
384 struct irqaction *action; 384 struct irqaction *action;
385 unsigned long flags;
385 386
386 if (!desc) 387 if (!desc)
387 return 0; 388 return 0;
@@ -389,11 +390,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
389 if (desc->status & IRQ_NOREQUEST) 390 if (desc->status & IRQ_NOREQUEST)
390 return 0; 391 return 0;
391 392
393 raw_spin_lock_irqsave(&desc->lock, flags);
392 action = desc->action; 394 action = desc->action;
393 if (action) 395 if (action)
394 if (irqflags & action->flags & IRQF_SHARED) 396 if (irqflags & action->flags & IRQF_SHARED)
395 action = NULL; 397 action = NULL;
396 398
399 raw_spin_unlock_irqrestore(&desc->lock, flags);
400
397 return !action; 401 return !action;
398} 402}
399 403
@@ -483,8 +487,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
483 */ 487 */
484static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 488static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
485{ 489{
490again:
486 chip_bus_lock(irq, desc); 491 chip_bus_lock(irq, desc);
487 raw_spin_lock_irq(&desc->lock); 492 raw_spin_lock_irq(&desc->lock);
493
494 /*
495 * Implausible though it may be we need to protect us against
496 * the following scenario:
497 *
498 * The thread is faster done than the hard interrupt handler
499 * on the other CPU. If we unmask the irq line then the
500 * interrupt can come in again and masks the line, leaves due
501 * to IRQ_INPROGRESS and the irq line is masked forever.
502 */
503 if (unlikely(desc->status & IRQ_INPROGRESS)) {
504 raw_spin_unlock_irq(&desc->lock);
505 chip_bus_sync_unlock(irq, desc);
506 cpu_relax();
507 goto again;
508 }
509
488 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 510 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
489 desc->status &= ~IRQ_MASKED; 511 desc->status &= ~IRQ_MASKED;
490 desc->chip->unmask(irq); 512 desc->chip->unmask(irq);
@@ -735,6 +757,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
735 if (new->flags & IRQF_ONESHOT) 757 if (new->flags & IRQF_ONESHOT)
736 desc->status |= IRQ_ONESHOT; 758 desc->status |= IRQ_ONESHOT;
737 759
760 /*
761 * Force MSI interrupts to run with interrupts
762 * disabled. The multi vector cards can cause stack
763 * overflows due to nested interrupts when enough of
764 * them are directed to a core and fire at the same
765 * time.
766 */
767 if (desc->msi_desc)
768 new->flags |= IRQF_DISABLED;
769
738 if (!(desc->status & IRQ_NOAUTOEN)) { 770 if (!(desc->status & IRQ_NOAUTOEN)) {
739 desc->depth = 0; 771 desc->depth = 0;
740 desc->status &= ~IRQ_DISABLED; 772 desc->status &= ~IRQ_DISABLED;
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 963559dbd858..65d3845665ac 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/irq.h> 8#include <linux/irq.h>
9#include <linux/slab.h>
9#include <linux/module.h> 10#include <linux/module.h>
10#include <linux/random.h> 11#include <linux/random.h>
11#include <linux/interrupt.h> 12#include <linux/interrupt.h>
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 6f50eccc79c0..7a6eb04ef6b5 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/gfp.h>
10#include <linux/proc_fs.h> 11#include <linux/proc_fs.h>
11#include <linux/seq_file.h> 12#include <linux/seq_file.h>
12#include <linux/interrupt.h> 13#include <linux/interrupt.h>
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 8e5288a8a355..13aff293f4de 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -21,6 +21,7 @@
21#include <linux/sched.h> /* for cond_resched */ 21#include <linux/sched.h> /* for cond_resched */
22#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/ctype.h> 23#include <linux/ctype.h>
24#include <linux/slab.h>
24 25
25#include <asm/sections.h> 26#include <asm/sections.h>
26 27
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 761fdd2b3034..11f3515ca83f 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -69,9 +69,16 @@ struct kgdb_state {
69 struct pt_regs *linux_regs; 69 struct pt_regs *linux_regs;
70}; 70};
71 71
72/* Exception state values */
73#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
74#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
75#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
76#define DCPU_SSTEP 0x8 /* CPU is single stepping */
77
72static struct debuggerinfo_struct { 78static struct debuggerinfo_struct {
73 void *debuggerinfo; 79 void *debuggerinfo;
74 struct task_struct *task; 80 struct task_struct *task;
81 int exception_state;
75} kgdb_info[NR_CPUS]; 82} kgdb_info[NR_CPUS];
76 83
77/** 84/**
@@ -391,27 +398,22 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
391 398
392/* 399/*
393 * Copy the binary array pointed to by buf into mem. Fix $, #, and 400 * Copy the binary array pointed to by buf into mem. Fix $, #, and
394 * 0x7d escaped with 0x7d. Return a pointer to the character after 401 * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
395 * the last byte written. 402 * The input buf is overwitten with the result to write to mem.
396 */ 403 */
397static int kgdb_ebin2mem(char *buf, char *mem, int count) 404static int kgdb_ebin2mem(char *buf, char *mem, int count)
398{ 405{
399 int err = 0; 406 int size = 0;
400 char c; 407 char *c = buf;
401 408
402 while (count-- > 0) { 409 while (count-- > 0) {
403 c = *buf++; 410 c[size] = *buf++;
404 if (c == 0x7d) 411 if (c[size] == 0x7d)
405 c = *buf++ ^ 0x20; 412 c[size] = *buf++ ^ 0x20;
406 413 size++;
407 err = probe_kernel_write(mem, &c, 1);
408 if (err)
409 break;
410
411 mem++;
412 } 414 }
413 415
414 return err; 416 return probe_kernel_write(mem, c, size);
415} 417}
416 418
417/* 419/*
@@ -563,49 +565,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
563} 565}
564 566
565/* 567/*
566 * CPU debug state control:
567 */
568
569#ifdef CONFIG_SMP
570static void kgdb_wait(struct pt_regs *regs)
571{
572 unsigned long flags;
573 int cpu;
574
575 local_irq_save(flags);
576 cpu = raw_smp_processor_id();
577 kgdb_info[cpu].debuggerinfo = regs;
578 kgdb_info[cpu].task = current;
579 /*
580 * Make sure the above info reaches the primary CPU before
581 * our cpu_in_kgdb[] flag setting does:
582 */
583 smp_wmb();
584 atomic_set(&cpu_in_kgdb[cpu], 1);
585
586 /* Disable any cpu specific hw breakpoints */
587 kgdb_disable_hw_debug(regs);
588
589 /* Wait till primary CPU is done with debugging */
590 while (atomic_read(&passive_cpu_wait[cpu]))
591 cpu_relax();
592
593 kgdb_info[cpu].debuggerinfo = NULL;
594 kgdb_info[cpu].task = NULL;
595
596 /* fix up hardware debug registers on local cpu */
597 if (arch_kgdb_ops.correct_hw_break)
598 arch_kgdb_ops.correct_hw_break();
599
600 /* Signal the primary CPU that we are done: */
601 atomic_set(&cpu_in_kgdb[cpu], 0);
602 touch_softlockup_watchdog_sync();
603 clocksource_touch_watchdog();
604 local_irq_restore(flags);
605}
606#endif
607
608/*
609 * Some architectures need cache flushes when we set/clear a 568 * Some architectures need cache flushes when we set/clear a
610 * breakpoint: 569 * breakpoint:
611 */ 570 */
@@ -1400,34 +1359,13 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
1400 return 1; 1359 return 1;
1401} 1360}
1402 1361
1403/* 1362static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
1404 * kgdb_handle_exception() - main entry point from a kernel exception
1405 *
1406 * Locking hierarchy:
1407 * interface locks, if any (begin_session)
1408 * kgdb lock (kgdb_active)
1409 */
1410int
1411kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
1412{ 1363{
1413 struct kgdb_state kgdb_var;
1414 struct kgdb_state *ks = &kgdb_var;
1415 unsigned long flags; 1364 unsigned long flags;
1416 int sstep_tries = 100; 1365 int sstep_tries = 100;
1417 int error = 0; 1366 int error = 0;
1418 int i, cpu; 1367 int i, cpu;
1419 1368 int trace_on = 0;
1420 ks->cpu = raw_smp_processor_id();
1421 ks->ex_vector = evector;
1422 ks->signo = signo;
1423 ks->ex_vector = evector;
1424 ks->err_code = ecode;
1425 ks->kgdb_usethreadid = 0;
1426 ks->linux_regs = regs;
1427
1428 if (kgdb_reenter_check(ks))
1429 return 0; /* Ouch, double exception ! */
1430
1431acquirelock: 1369acquirelock:
1432 /* 1370 /*
1433 * Interrupts will be restored by the 'trap return' code, except when 1371 * Interrupts will be restored by the 'trap return' code, except when
@@ -1435,13 +1373,43 @@ acquirelock:
1435 */ 1373 */
1436 local_irq_save(flags); 1374 local_irq_save(flags);
1437 1375
1438 cpu = raw_smp_processor_id(); 1376 cpu = ks->cpu;
1377 kgdb_info[cpu].debuggerinfo = regs;
1378 kgdb_info[cpu].task = current;
1379 /*
1380 * Make sure the above info reaches the primary CPU before
1381 * our cpu_in_kgdb[] flag setting does:
1382 */
1383 atomic_inc(&cpu_in_kgdb[cpu]);
1439 1384
1440 /* 1385 /*
1441 * Acquire the kgdb_active lock: 1386 * CPU will loop if it is a slave or request to become a kgdb
1387 * master cpu and acquire the kgdb_active lock:
1442 */ 1388 */
1443 while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1) 1389 while (1) {
1390 if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
1391 if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
1392 break;
1393 } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
1394 if (!atomic_read(&passive_cpu_wait[cpu]))
1395 goto return_normal;
1396 } else {
1397return_normal:
1398 /* Return to normal operation by executing any
1399 * hw breakpoint fixup.
1400 */
1401 if (arch_kgdb_ops.correct_hw_break)
1402 arch_kgdb_ops.correct_hw_break();
1403 if (trace_on)
1404 tracing_on();
1405 atomic_dec(&cpu_in_kgdb[cpu]);
1406 touch_softlockup_watchdog_sync();
1407 clocksource_touch_watchdog();
1408 local_irq_restore(flags);
1409 return 0;
1410 }
1444 cpu_relax(); 1411 cpu_relax();
1412 }
1445 1413
1446 /* 1414 /*
1447 * For single stepping, try to only enter on the processor 1415 * For single stepping, try to only enter on the processor
@@ -1475,9 +1443,6 @@ acquirelock:
1475 if (kgdb_io_ops->pre_exception) 1443 if (kgdb_io_ops->pre_exception)
1476 kgdb_io_ops->pre_exception(); 1444 kgdb_io_ops->pre_exception();
1477 1445
1478 kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
1479 kgdb_info[ks->cpu].task = current;
1480
1481 kgdb_disable_hw_debug(ks->linux_regs); 1446 kgdb_disable_hw_debug(ks->linux_regs);
1482 1447
1483 /* 1448 /*
@@ -1486,15 +1451,9 @@ acquirelock:
1486 */ 1451 */
1487 if (!kgdb_single_step) { 1452 if (!kgdb_single_step) {
1488 for (i = 0; i < NR_CPUS; i++) 1453 for (i = 0; i < NR_CPUS; i++)
1489 atomic_set(&passive_cpu_wait[i], 1); 1454 atomic_inc(&passive_cpu_wait[i]);
1490 } 1455 }
1491 1456
1492 /*
1493 * spin_lock code is good enough as a barrier so we don't
1494 * need one here:
1495 */
1496 atomic_set(&cpu_in_kgdb[ks->cpu], 1);
1497
1498#ifdef CONFIG_SMP 1457#ifdef CONFIG_SMP
1499 /* Signal the other CPUs to enter kgdb_wait() */ 1458 /* Signal the other CPUs to enter kgdb_wait() */
1500 if ((!kgdb_single_step) && kgdb_do_roundup) 1459 if ((!kgdb_single_step) && kgdb_do_roundup)
@@ -1518,6 +1477,9 @@ acquirelock:
1518 kgdb_single_step = 0; 1477 kgdb_single_step = 0;
1519 kgdb_contthread = current; 1478 kgdb_contthread = current;
1520 exception_level = 0; 1479 exception_level = 0;
1480 trace_on = tracing_is_on();
1481 if (trace_on)
1482 tracing_off();
1521 1483
1522 /* Talk to debugger with gdbserial protocol */ 1484 /* Talk to debugger with gdbserial protocol */
1523 error = gdb_serial_stub(ks); 1485 error = gdb_serial_stub(ks);
@@ -1526,13 +1488,11 @@ acquirelock:
1526 if (kgdb_io_ops->post_exception) 1488 if (kgdb_io_ops->post_exception)
1527 kgdb_io_ops->post_exception(); 1489 kgdb_io_ops->post_exception();
1528 1490
1529 kgdb_info[ks->cpu].debuggerinfo = NULL; 1491 atomic_dec(&cpu_in_kgdb[ks->cpu]);
1530 kgdb_info[ks->cpu].task = NULL;
1531 atomic_set(&cpu_in_kgdb[ks->cpu], 0);
1532 1492
1533 if (!kgdb_single_step) { 1493 if (!kgdb_single_step) {
1534 for (i = NR_CPUS-1; i >= 0; i--) 1494 for (i = NR_CPUS-1; i >= 0; i--)
1535 atomic_set(&passive_cpu_wait[i], 0); 1495 atomic_dec(&passive_cpu_wait[i]);
1536 /* 1496 /*
1537 * Wait till all the CPUs have quit 1497 * Wait till all the CPUs have quit
1538 * from the debugger. 1498 * from the debugger.
@@ -1551,6 +1511,8 @@ kgdb_restore:
1551 else 1511 else
1552 kgdb_sstep_pid = 0; 1512 kgdb_sstep_pid = 0;
1553 } 1513 }
1514 if (trace_on)
1515 tracing_on();
1554 /* Free kgdb_active */ 1516 /* Free kgdb_active */
1555 atomic_set(&kgdb_active, -1); 1517 atomic_set(&kgdb_active, -1);
1556 touch_softlockup_watchdog_sync(); 1518 touch_softlockup_watchdog_sync();
@@ -1560,13 +1522,52 @@ kgdb_restore:
1560 return error; 1522 return error;
1561} 1523}
1562 1524
1525/*
1526 * kgdb_handle_exception() - main entry point from a kernel exception
1527 *
1528 * Locking hierarchy:
1529 * interface locks, if any (begin_session)
1530 * kgdb lock (kgdb_active)
1531 */
1532int
1533kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
1534{
1535 struct kgdb_state kgdb_var;
1536 struct kgdb_state *ks = &kgdb_var;
1537 int ret;
1538
1539 ks->cpu = raw_smp_processor_id();
1540 ks->ex_vector = evector;
1541 ks->signo = signo;
1542 ks->ex_vector = evector;
1543 ks->err_code = ecode;
1544 ks->kgdb_usethreadid = 0;
1545 ks->linux_regs = regs;
1546
1547 if (kgdb_reenter_check(ks))
1548 return 0; /* Ouch, double exception ! */
1549 kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
1550 ret = kgdb_cpu_enter(ks, regs);
1551 kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
1552 return ret;
1553}
1554
1563int kgdb_nmicallback(int cpu, void *regs) 1555int kgdb_nmicallback(int cpu, void *regs)
1564{ 1556{
1565#ifdef CONFIG_SMP 1557#ifdef CONFIG_SMP
1558 struct kgdb_state kgdb_var;
1559 struct kgdb_state *ks = &kgdb_var;
1560
1561 memset(ks, 0, sizeof(struct kgdb_state));
1562 ks->cpu = cpu;
1563 ks->linux_regs = regs;
1564
1566 if (!atomic_read(&cpu_in_kgdb[cpu]) && 1565 if (!atomic_read(&cpu_in_kgdb[cpu]) &&
1567 atomic_read(&kgdb_active) != cpu && 1566 atomic_read(&kgdb_active) != -1 &&
1568 atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) { 1567 atomic_read(&kgdb_active) != cpu) {
1569 kgdb_wait((struct pt_regs *)regs); 1568 kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
1569 kgdb_cpu_enter(ks, regs);
1570 kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
1570 return 0; 1571 return 0;
1571 } 1572 }
1572#endif 1573#endif
@@ -1742,11 +1743,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
1742 */ 1743 */
1743void kgdb_breakpoint(void) 1744void kgdb_breakpoint(void)
1744{ 1745{
1745 atomic_set(&kgdb_setting_breakpoint, 1); 1746 atomic_inc(&kgdb_setting_breakpoint);
1746 wmb(); /* Sync point before breakpoint */ 1747 wmb(); /* Sync point before breakpoint */
1747 arch_kgdb_breakpoint(); 1748 arch_kgdb_breakpoint();
1748 wmb(); /* Sync point after breakpoint */ 1749 wmb(); /* Sync point after breakpoint */
1749 atomic_set(&kgdb_setting_breakpoint, 0); 1750 atomic_dec(&kgdb_setting_breakpoint);
1750} 1751}
1751EXPORT_SYMBOL_GPL(kgdb_breakpoint); 1752EXPORT_SYMBOL_GPL(kgdb_breakpoint);
1752 1753
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index fa034d29cf73..0ed46f3e51e9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -259,7 +259,8 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
259 struct kprobe_insn_page *kip; 259 struct kprobe_insn_page *kip;
260 260
261 list_for_each_entry(kip, &c->pages, list) { 261 list_for_each_entry(kip, &c->pages, list) {
262 long idx = ((long)slot - (long)kip->insns) / c->insn_size; 262 long idx = ((long)slot - (long)kip->insns) /
263 (c->insn_size * sizeof(kprobe_opcode_t));
263 if (idx >= 0 && idx < slots_per_page(c)) { 264 if (idx >= 0 && idx < slots_per_page(c)) {
264 WARN_ON(kip->slot_used[idx] != SLOT_USED); 265 WARN_ON(kip->slot_used[idx] != SLOT_USED);
265 if (dirty) { 266 if (dirty) {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 82ed0ea15194..83911c780175 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -219,7 +219,7 @@ int kthreadd(void *unused)
219 set_task_comm(tsk, "kthreadd"); 219 set_task_comm(tsk, "kthreadd");
220 ignore_signals(tsk); 220 ignore_signals(tsk);
221 set_cpus_allowed_ptr(tsk, cpu_all_mask); 221 set_cpus_allowed_ptr(tsk, cpu_all_mask);
222 set_mems_allowed(node_possible_map); 222 set_mems_allowed(node_states[N_HIGH_MEMORY]);
223 223
224 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 224 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
225 225
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index ca07c5c0c914..877fb306d415 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -56,7 +56,6 @@
56#include <linux/module.h> 56#include <linux/module.h>
57#include <linux/sched.h> 57#include <linux/sched.h>
58#include <linux/list.h> 58#include <linux/list.h>
59#include <linux/slab.h>
60#include <linux/stacktrace.h> 59#include <linux/stacktrace.h>
61 60
62static DEFINE_SPINLOCK(latency_lock); 61static DEFINE_SPINLOCK(latency_lock);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 681bc2e1e187..2594e1ce41cb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,6 +43,7 @@
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <linux/bitops.h> 45#include <linux/bitops.h>
46#include <linux/gfp.h>
46 47
47#include <asm/sections.h> 48#include <asm/sections.h>
48 49
@@ -582,9 +583,6 @@ static int static_obj(void *obj)
582 unsigned long start = (unsigned long) &_stext, 583 unsigned long start = (unsigned long) &_stext,
583 end = (unsigned long) &_end, 584 end = (unsigned long) &_end,
584 addr = (unsigned long) obj; 585 addr = (unsigned long) obj;
585#ifdef CONFIG_SMP
586 int i;
587#endif
588 586
589 /* 587 /*
590 * static variable? 588 * static variable?
@@ -595,24 +593,16 @@ static int static_obj(void *obj)
595 if (arch_is_kernel_data(addr)) 593 if (arch_is_kernel_data(addr))
596 return 1; 594 return 1;
597 595
598#ifdef CONFIG_SMP
599 /* 596 /*
600 * percpu var? 597 * in-kernel percpu var?
601 */ 598 */
602 for_each_possible_cpu(i) { 599 if (is_kernel_percpu_address(addr))
603 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); 600 return 1;
604 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
605 + per_cpu_offset(i);
606
607 if ((addr >= start) && (addr < end))
608 return 1;
609 }
610#endif
611 601
612 /* 602 /*
613 * module var? 603 * module static or percpu var?
614 */ 604 */
615 return is_module_address(addr); 605 return is_module_address(addr) || is_module_percpu_address(addr);
616} 606}
617 607
618/* 608/*
@@ -3211,8 +3201,6 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3211{ 3201{
3212 unsigned long flags; 3202 unsigned long flags;
3213 3203
3214 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3215
3216 if (unlikely(current->lockdep_recursion)) 3204 if (unlikely(current->lockdep_recursion))
3217 return; 3205 return;
3218 3206
@@ -3220,6 +3208,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3220 check_flags(flags); 3208 check_flags(flags);
3221 3209
3222 current->lockdep_recursion = 1; 3210 current->lockdep_recursion = 1;
3211 trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
3223 __lock_acquire(lock, subclass, trylock, read, check, 3212 __lock_acquire(lock, subclass, trylock, read, check,
3224 irqs_disabled_flags(flags), nest_lock, ip, 0); 3213 irqs_disabled_flags(flags), nest_lock, ip, 0);
3225 current->lockdep_recursion = 0; 3214 current->lockdep_recursion = 0;
@@ -3232,14 +3221,13 @@ void lock_release(struct lockdep_map *lock, int nested,
3232{ 3221{
3233 unsigned long flags; 3222 unsigned long flags;
3234 3223
3235 trace_lock_release(lock, nested, ip);
3236
3237 if (unlikely(current->lockdep_recursion)) 3224 if (unlikely(current->lockdep_recursion))
3238 return; 3225 return;
3239 3226
3240 raw_local_irq_save(flags); 3227 raw_local_irq_save(flags);
3241 check_flags(flags); 3228 check_flags(flags);
3242 current->lockdep_recursion = 1; 3229 current->lockdep_recursion = 1;
3230 trace_lock_release(lock, nested, ip);
3243 __lock_release(lock, nested, ip); 3231 __lock_release(lock, nested, ip);
3244 current->lockdep_recursion = 0; 3232 current->lockdep_recursion = 0;
3245 raw_local_irq_restore(flags); 3233 raw_local_irq_restore(flags);
@@ -3413,8 +3401,6 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3413{ 3401{
3414 unsigned long flags; 3402 unsigned long flags;
3415 3403
3416 trace_lock_contended(lock, ip);
3417
3418 if (unlikely(!lock_stat)) 3404 if (unlikely(!lock_stat))
3419 return; 3405 return;
3420 3406
@@ -3424,6 +3410,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
3424 raw_local_irq_save(flags); 3410 raw_local_irq_save(flags);
3425 check_flags(flags); 3411 check_flags(flags);
3426 current->lockdep_recursion = 1; 3412 current->lockdep_recursion = 1;
3413 trace_lock_contended(lock, ip);
3427 __lock_contended(lock, ip); 3414 __lock_contended(lock, ip);
3428 current->lockdep_recursion = 0; 3415 current->lockdep_recursion = 0;
3429 raw_local_irq_restore(flags); 3416 raw_local_irq_restore(flags);
diff --git a/kernel/module.c b/kernel/module.c
index c968d3606dca..1016b75b026a 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -370,27 +370,33 @@ EXPORT_SYMBOL_GPL(find_module);
370 370
371#ifdef CONFIG_SMP 371#ifdef CONFIG_SMP
372 372
373static void *percpu_modalloc(unsigned long size, unsigned long align, 373static inline void __percpu *mod_percpu(struct module *mod)
374 const char *name)
375{ 374{
376 void *ptr; 375 return mod->percpu;
376}
377 377
378static int percpu_modalloc(struct module *mod,
379 unsigned long size, unsigned long align)
380{
378 if (align > PAGE_SIZE) { 381 if (align > PAGE_SIZE) {
379 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", 382 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
380 name, align, PAGE_SIZE); 383 mod->name, align, PAGE_SIZE);
381 align = PAGE_SIZE; 384 align = PAGE_SIZE;
382 } 385 }
383 386
384 ptr = __alloc_reserved_percpu(size, align); 387 mod->percpu = __alloc_reserved_percpu(size, align);
385 if (!ptr) 388 if (!mod->percpu) {
386 printk(KERN_WARNING 389 printk(KERN_WARNING
387 "Could not allocate %lu bytes percpu data\n", size); 390 "Could not allocate %lu bytes percpu data\n", size);
388 return ptr; 391 return -ENOMEM;
392 }
393 mod->percpu_size = size;
394 return 0;
389} 395}
390 396
391static void percpu_modfree(void *freeme) 397static void percpu_modfree(struct module *mod)
392{ 398{
393 free_percpu(freeme); 399 free_percpu(mod->percpu);
394} 400}
395 401
396static unsigned int find_pcpusec(Elf_Ehdr *hdr, 402static unsigned int find_pcpusec(Elf_Ehdr *hdr,
@@ -400,24 +406,62 @@ static unsigned int find_pcpusec(Elf_Ehdr *hdr,
400 return find_sec(hdr, sechdrs, secstrings, ".data.percpu"); 406 return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
401} 407}
402 408
403static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size) 409static void percpu_modcopy(struct module *mod,
410 const void *from, unsigned long size)
404{ 411{
405 int cpu; 412 int cpu;
406 413
407 for_each_possible_cpu(cpu) 414 for_each_possible_cpu(cpu)
408 memcpy(pcpudest + per_cpu_offset(cpu), from, size); 415 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
416}
417
418/**
419 * is_module_percpu_address - test whether address is from module static percpu
420 * @addr: address to test
421 *
422 * Test whether @addr belongs to module static percpu area.
423 *
424 * RETURNS:
425 * %true if @addr is from module static percpu area
426 */
427bool is_module_percpu_address(unsigned long addr)
428{
429 struct module *mod;
430 unsigned int cpu;
431
432 preempt_disable();
433
434 list_for_each_entry_rcu(mod, &modules, list) {
435 if (!mod->percpu_size)
436 continue;
437 for_each_possible_cpu(cpu) {
438 void *start = per_cpu_ptr(mod->percpu, cpu);
439
440 if ((void *)addr >= start &&
441 (void *)addr < start + mod->percpu_size) {
442 preempt_enable();
443 return true;
444 }
445 }
446 }
447
448 preempt_enable();
449 return false;
409} 450}
410 451
411#else /* ... !CONFIG_SMP */ 452#else /* ... !CONFIG_SMP */
412 453
413static inline void *percpu_modalloc(unsigned long size, unsigned long align, 454static inline void __percpu *mod_percpu(struct module *mod)
414 const char *name)
415{ 455{
416 return NULL; 456 return NULL;
417} 457}
418static inline void percpu_modfree(void *pcpuptr) 458static inline int percpu_modalloc(struct module *mod,
459 unsigned long size, unsigned long align)
460{
461 return -ENOMEM;
462}
463static inline void percpu_modfree(struct module *mod)
419{ 464{
420 BUG();
421} 465}
422static inline unsigned int find_pcpusec(Elf_Ehdr *hdr, 466static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
423 Elf_Shdr *sechdrs, 467 Elf_Shdr *sechdrs,
@@ -425,12 +469,16 @@ static inline unsigned int find_pcpusec(Elf_Ehdr *hdr,
425{ 469{
426 return 0; 470 return 0;
427} 471}
428static inline void percpu_modcopy(void *pcpudst, const void *src, 472static inline void percpu_modcopy(struct module *mod,
429 unsigned long size) 473 const void *from, unsigned long size)
430{ 474{
431 /* pcpusec should be 0, and size of that section should be 0. */ 475 /* pcpusec should be 0, and size of that section should be 0. */
432 BUG_ON(size != 0); 476 BUG_ON(size != 0);
433} 477}
478bool is_module_percpu_address(unsigned long addr)
479{
480 return false;
481}
434 482
435#endif /* CONFIG_SMP */ 483#endif /* CONFIG_SMP */
436 484
@@ -473,11 +521,13 @@ static void module_unload_init(struct module *mod)
473 int cpu; 521 int cpu;
474 522
475 INIT_LIST_HEAD(&mod->modules_which_use_me); 523 INIT_LIST_HEAD(&mod->modules_which_use_me);
476 for_each_possible_cpu(cpu) 524 for_each_possible_cpu(cpu) {
477 per_cpu_ptr(mod->refptr, cpu)->count = 0; 525 per_cpu_ptr(mod->refptr, cpu)->incs = 0;
526 per_cpu_ptr(mod->refptr, cpu)->decs = 0;
527 }
478 528
479 /* Hold reference count during initialization. */ 529 /* Hold reference count during initialization. */
480 __this_cpu_write(mod->refptr->count, 1); 530 __this_cpu_write(mod->refptr->incs, 1);
481 /* Backwards compatibility macros put refcount during init. */ 531 /* Backwards compatibility macros put refcount during init. */
482 mod->waiter = current; 532 mod->waiter = current;
483} 533}
@@ -616,12 +666,28 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
616 666
617unsigned int module_refcount(struct module *mod) 667unsigned int module_refcount(struct module *mod)
618{ 668{
619 unsigned int total = 0; 669 unsigned int incs = 0, decs = 0;
620 int cpu; 670 int cpu;
621 671
622 for_each_possible_cpu(cpu) 672 for_each_possible_cpu(cpu)
623 total += per_cpu_ptr(mod->refptr, cpu)->count; 673 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
624 return total; 674 /*
675 * ensure the incs are added up after the decs.
676 * module_put ensures incs are visible before decs with smp_wmb.
677 *
678 * This 2-count scheme avoids the situation where the refcount
679 * for CPU0 is read, then CPU0 increments the module refcount,
680 * then CPU1 drops that refcount, then the refcount for CPU1 is
681 * read. We would record a decrement but not its corresponding
682 * increment so we would see a low count (disaster).
683 *
684 * Rare situation? But module_refcount can be preempted, and we
685 * might be tallying up 4096+ CPUs. So it is not impossible.
686 */
687 smp_rmb();
688 for_each_possible_cpu(cpu)
689 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
690 return incs - decs;
625} 691}
626EXPORT_SYMBOL(module_refcount); 692EXPORT_SYMBOL(module_refcount);
627 693
@@ -798,10 +864,11 @@ void module_put(struct module *module)
798{ 864{
799 if (module) { 865 if (module) {
800 preempt_disable(); 866 preempt_disable();
801 __this_cpu_dec(module->refptr->count); 867 smp_wmb(); /* see comment in module_refcount */
868 __this_cpu_inc(module->refptr->decs);
802 869
803 trace_module_put(module, _RET_IP_, 870 trace_module_put(module, _RET_IP_,
804 __this_cpu_read(module->refptr->count)); 871 __this_cpu_read(module->refptr->decs));
805 /* Maybe they're waiting for us to drop reference? */ 872 /* Maybe they're waiting for us to drop reference? */
806 if (unlikely(!module_is_live(module))) 873 if (unlikely(!module_is_live(module)))
807 wake_up_process(module->waiter); 874 wake_up_process(module->waiter);
@@ -1400,8 +1467,7 @@ static void free_module(struct module *mod)
1400 /* This may be NULL, but that's OK */ 1467 /* This may be NULL, but that's OK */
1401 module_free(mod, mod->module_init); 1468 module_free(mod, mod->module_init);
1402 kfree(mod->args); 1469 kfree(mod->args);
1403 if (mod->percpu) 1470 percpu_modfree(mod);
1404 percpu_modfree(mod->percpu);
1405#if defined(CONFIG_MODULE_UNLOAD) 1471#if defined(CONFIG_MODULE_UNLOAD)
1406 if (mod->refptr) 1472 if (mod->refptr)
1407 free_percpu(mod->refptr); 1473 free_percpu(mod->refptr);
@@ -1520,7 +1586,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
1520 default: 1586 default:
1521 /* Divert to percpu allocation if a percpu var. */ 1587 /* Divert to percpu allocation if a percpu var. */
1522 if (sym[i].st_shndx == pcpuindex) 1588 if (sym[i].st_shndx == pcpuindex)
1523 secbase = (unsigned long)mod->percpu; 1589 secbase = (unsigned long)mod_percpu(mod);
1524 else 1590 else
1525 secbase = sechdrs[sym[i].st_shndx].sh_addr; 1591 secbase = sechdrs[sym[i].st_shndx].sh_addr;
1526 sym[i].st_value += secbase; 1592 sym[i].st_value += secbase;
@@ -1954,7 +2020,7 @@ static noinline struct module *load_module(void __user *umod,
1954 unsigned int modindex, versindex, infoindex, pcpuindex; 2020 unsigned int modindex, versindex, infoindex, pcpuindex;
1955 struct module *mod; 2021 struct module *mod;
1956 long err = 0; 2022 long err = 0;
1957 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 2023 void *ptr = NULL; /* Stops spurious gcc warning */
1958 unsigned long symoffs, stroffs, *strmap; 2024 unsigned long symoffs, stroffs, *strmap;
1959 2025
1960 mm_segment_t old_fs; 2026 mm_segment_t old_fs;
@@ -2094,15 +2160,11 @@ static noinline struct module *load_module(void __user *umod,
2094 2160
2095 if (pcpuindex) { 2161 if (pcpuindex) {
2096 /* We have a special allocation for this section. */ 2162 /* We have a special allocation for this section. */
2097 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, 2163 err = percpu_modalloc(mod, sechdrs[pcpuindex].sh_size,
2098 sechdrs[pcpuindex].sh_addralign, 2164 sechdrs[pcpuindex].sh_addralign);
2099 mod->name); 2165 if (err)
2100 if (!percpu) {
2101 err = -ENOMEM;
2102 goto free_mod; 2166 goto free_mod;
2103 }
2104 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2167 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2105 mod->percpu = percpu;
2106 } 2168 }
2107 2169
2108 /* Determine total sizes, and put offsets in sh_entsize. For now 2170 /* Determine total sizes, and put offsets in sh_entsize. For now
@@ -2317,7 +2379,7 @@ static noinline struct module *load_module(void __user *umod,
2317 sort_extable(mod->extable, mod->extable + mod->num_exentries); 2379 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2318 2380
2319 /* Finally, copy percpu area over. */ 2381 /* Finally, copy percpu area over. */
2320 percpu_modcopy(mod->percpu, (void *)sechdrs[pcpuindex].sh_addr, 2382 percpu_modcopy(mod, (void *)sechdrs[pcpuindex].sh_addr,
2321 sechdrs[pcpuindex].sh_size); 2383 sechdrs[pcpuindex].sh_size);
2322 2384
2323 add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex, 2385 add_kallsyms(mod, sechdrs, hdr->e_shnum, symindex, strindex,
@@ -2409,8 +2471,7 @@ static noinline struct module *load_module(void __user *umod,
2409 module_free(mod, mod->module_core); 2471 module_free(mod, mod->module_core);
2410 /* mod will be freed with core. Don't access it beyond this line! */ 2472 /* mod will be freed with core. Don't access it beyond this line! */
2411 free_percpu: 2473 free_percpu:
2412 if (percpu) 2474 percpu_modfree(mod);
2413 percpu_modfree(percpu);
2414 free_mod: 2475 free_mod:
2415 kfree(args); 2476 kfree(args);
2416 kfree(strmap); 2477 kfree(strmap);
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 2ab67233ee8f..f74e6c00e26d 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -13,6 +13,7 @@
13 * Pavel Emelianov <xemul@openvz.org> 13 * Pavel Emelianov <xemul@openvz.org>
14 */ 14 */
15 15
16#include <linux/slab.h>
16#include <linux/module.h> 17#include <linux/module.h>
17#include <linux/nsproxy.h> 18#include <linux/nsproxy.h>
18#include <linux/init_task.h> 19#include <linux/init_task.h>
diff --git a/kernel/padata.c b/kernel/padata.c
index 93caf65ff57c..fd03513c7327 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -25,6 +25,7 @@
25#include <linux/padata.h> 25#include <linux/padata.h>
26#include <linux/mutex.h> 26#include <linux/mutex.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/slab.h>
28#include <linux/rcupdate.h> 29#include <linux/rcupdate.h>
29 30
30#define MAX_SEQ_NR INT_MAX - NR_CPUS 31#define MAX_SEQ_NR INT_MAX - NR_CPUS
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 4393b9e73740..3d1552d3c12b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -15,6 +15,7 @@
15#include <linux/smp.h> 15#include <linux/smp.h>
16#include <linux/file.h> 16#include <linux/file.h>
17#include <linux/poll.h> 17#include <linux/poll.h>
18#include <linux/slab.h>
18#include <linux/sysfs.h> 19#include <linux/sysfs.h>
19#include <linux/dcache.h> 20#include <linux/dcache.h>
20#include <linux/percpu.h> 21#include <linux/percpu.h>
@@ -81,10 +82,6 @@ extern __weak const struct pmu *hw_perf_event_init(struct perf_event *event)
81void __weak hw_perf_disable(void) { barrier(); } 82void __weak hw_perf_disable(void) { barrier(); }
82void __weak hw_perf_enable(void) { barrier(); } 83void __weak hw_perf_enable(void) { barrier(); }
83 84
84void __weak hw_perf_event_setup(int cpu) { barrier(); }
85void __weak hw_perf_event_setup_online(int cpu) { barrier(); }
86void __weak hw_perf_event_setup_offline(int cpu) { barrier(); }
87
88int __weak 85int __weak
89hw_perf_group_sched_in(struct perf_event *group_leader, 86hw_perf_group_sched_in(struct perf_event *group_leader,
90 struct perf_cpu_context *cpuctx, 87 struct perf_cpu_context *cpuctx,
@@ -97,25 +94,15 @@ void __weak perf_event_print_debug(void) { }
97 94
98static DEFINE_PER_CPU(int, perf_disable_count); 95static DEFINE_PER_CPU(int, perf_disable_count);
99 96
100void __perf_disable(void)
101{
102 __get_cpu_var(perf_disable_count)++;
103}
104
105bool __perf_enable(void)
106{
107 return !--__get_cpu_var(perf_disable_count);
108}
109
110void perf_disable(void) 97void perf_disable(void)
111{ 98{
112 __perf_disable(); 99 if (!__get_cpu_var(perf_disable_count)++)
113 hw_perf_disable(); 100 hw_perf_disable();
114} 101}
115 102
116void perf_enable(void) 103void perf_enable(void)
117{ 104{
118 if (__perf_enable()) 105 if (!--__get_cpu_var(perf_disable_count))
119 hw_perf_enable(); 106 hw_perf_enable();
120} 107}
121 108
@@ -1178,11 +1165,9 @@ void perf_event_task_sched_out(struct task_struct *task,
1178 struct perf_event_context *ctx = task->perf_event_ctxp; 1165 struct perf_event_context *ctx = task->perf_event_ctxp;
1179 struct perf_event_context *next_ctx; 1166 struct perf_event_context *next_ctx;
1180 struct perf_event_context *parent; 1167 struct perf_event_context *parent;
1181 struct pt_regs *regs;
1182 int do_switch = 1; 1168 int do_switch = 1;
1183 1169
1184 regs = task_pt_regs(task); 1170 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1185 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
1186 1171
1187 if (likely(!ctx || !cpuctx->task_ctx)) 1172 if (likely(!ctx || !cpuctx->task_ctx))
1188 return; 1173 return;
@@ -1538,12 +1523,15 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1538 */ 1523 */
1539 if (interrupts == MAX_INTERRUPTS) { 1524 if (interrupts == MAX_INTERRUPTS) {
1540 perf_log_throttle(event, 1); 1525 perf_log_throttle(event, 1);
1526 perf_disable();
1541 event->pmu->unthrottle(event); 1527 event->pmu->unthrottle(event);
1528 perf_enable();
1542 } 1529 }
1543 1530
1544 if (!event->attr.freq || !event->attr.sample_freq) 1531 if (!event->attr.freq || !event->attr.sample_freq)
1545 continue; 1532 continue;
1546 1533
1534 perf_disable();
1547 event->pmu->read(event); 1535 event->pmu->read(event);
1548 now = atomic64_read(&event->count); 1536 now = atomic64_read(&event->count);
1549 delta = now - hwc->freq_count_stamp; 1537 delta = now - hwc->freq_count_stamp;
@@ -1551,6 +1539,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1551 1539
1552 if (delta > 0) 1540 if (delta > 0)
1553 perf_adjust_period(event, TICK_NSEC, delta); 1541 perf_adjust_period(event, TICK_NSEC, delta);
1542 perf_enable();
1554 } 1543 }
1555 raw_spin_unlock(&ctx->lock); 1544 raw_spin_unlock(&ctx->lock);
1556} 1545}
@@ -1560,9 +1549,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
1560 */ 1549 */
1561static void rotate_ctx(struct perf_event_context *ctx) 1550static void rotate_ctx(struct perf_event_context *ctx)
1562{ 1551{
1563 if (!ctx->nr_events)
1564 return;
1565
1566 raw_spin_lock(&ctx->lock); 1552 raw_spin_lock(&ctx->lock);
1567 1553
1568 /* Rotate the first entry last of non-pinned groups */ 1554 /* Rotate the first entry last of non-pinned groups */
@@ -1575,19 +1561,28 @@ void perf_event_task_tick(struct task_struct *curr)
1575{ 1561{
1576 struct perf_cpu_context *cpuctx; 1562 struct perf_cpu_context *cpuctx;
1577 struct perf_event_context *ctx; 1563 struct perf_event_context *ctx;
1564 int rotate = 0;
1578 1565
1579 if (!atomic_read(&nr_events)) 1566 if (!atomic_read(&nr_events))
1580 return; 1567 return;
1581 1568
1582 cpuctx = &__get_cpu_var(perf_cpu_context); 1569 cpuctx = &__get_cpu_var(perf_cpu_context);
1583 ctx = curr->perf_event_ctxp; 1570 if (cpuctx->ctx.nr_events &&
1571 cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
1572 rotate = 1;
1584 1573
1585 perf_disable(); 1574 ctx = curr->perf_event_ctxp;
1575 if (ctx && ctx->nr_events && ctx->nr_events != ctx->nr_active)
1576 rotate = 1;
1586 1577
1587 perf_ctx_adjust_freq(&cpuctx->ctx); 1578 perf_ctx_adjust_freq(&cpuctx->ctx);
1588 if (ctx) 1579 if (ctx)
1589 perf_ctx_adjust_freq(ctx); 1580 perf_ctx_adjust_freq(ctx);
1590 1581
1582 if (!rotate)
1583 return;
1584
1585 perf_disable();
1591 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 1586 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1592 if (ctx) 1587 if (ctx)
1593 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 1588 task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
@@ -1599,7 +1594,6 @@ void perf_event_task_tick(struct task_struct *curr)
1599 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); 1594 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1600 if (ctx) 1595 if (ctx)
1601 task_ctx_sched_in(curr, EVENT_FLEXIBLE); 1596 task_ctx_sched_in(curr, EVENT_FLEXIBLE);
1602
1603 perf_enable(); 1597 perf_enable();
1604} 1598}
1605 1599
@@ -2791,6 +2785,12 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2791 return NULL; 2785 return NULL;
2792} 2786}
2793 2787
2788__weak
2789void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
2790{
2791}
2792
2793
2794/* 2794/*
2795 * Output 2795 * Output
2796 */ 2796 */
@@ -3376,15 +3376,23 @@ static void perf_event_task_output(struct perf_event *event,
3376 struct perf_task_event *task_event) 3376 struct perf_task_event *task_event)
3377{ 3377{
3378 struct perf_output_handle handle; 3378 struct perf_output_handle handle;
3379 int size;
3380 struct task_struct *task = task_event->task; 3379 struct task_struct *task = task_event->task;
3381 int ret; 3380 unsigned long flags;
3381 int size, ret;
3382
3383 /*
3384 * If this CPU attempts to acquire an rq lock held by a CPU spinning
3385 * in perf_output_lock() from interrupt context, it's game over.
3386 */
3387 local_irq_save(flags);
3382 3388
3383 size = task_event->event_id.header.size; 3389 size = task_event->event_id.header.size;
3384 ret = perf_output_begin(&handle, event, size, 0, 0); 3390 ret = perf_output_begin(&handle, event, size, 0, 0);
3385 3391
3386 if (ret) 3392 if (ret) {
3393 local_irq_restore(flags);
3387 return; 3394 return;
3395 }
3388 3396
3389 task_event->event_id.pid = perf_event_pid(event, task); 3397 task_event->event_id.pid = perf_event_pid(event, task);
3390 task_event->event_id.ppid = perf_event_pid(event, current); 3398 task_event->event_id.ppid = perf_event_pid(event, current);
@@ -3395,6 +3403,7 @@ static void perf_event_task_output(struct perf_event *event,
3395 perf_output_put(&handle, task_event->event_id); 3403 perf_output_put(&handle, task_event->event_id);
3396 3404
3397 perf_output_end(&handle); 3405 perf_output_end(&handle);
3406 local_irq_restore(flags);
3398} 3407}
3399 3408
3400static int perf_event_task_match(struct perf_event *event) 3409static int perf_event_task_match(struct perf_event *event)
@@ -4318,9 +4327,8 @@ static const struct pmu perf_ops_task_clock = {
4318#ifdef CONFIG_EVENT_TRACING 4327#ifdef CONFIG_EVENT_TRACING
4319 4328
4320void perf_tp_event(int event_id, u64 addr, u64 count, void *record, 4329void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4321 int entry_size) 4330 int entry_size, struct pt_regs *regs)
4322{ 4331{
4323 struct pt_regs *regs = get_irq_regs();
4324 struct perf_sample_data data; 4332 struct perf_sample_data data;
4325 struct perf_raw_record raw = { 4333 struct perf_raw_record raw = {
4326 .size = entry_size, 4334 .size = entry_size,
@@ -4330,12 +4338,9 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4330 perf_sample_data_init(&data, addr); 4338 perf_sample_data_init(&data, addr);
4331 data.raw = &raw; 4339 data.raw = &raw;
4332 4340
4333 if (!regs)
4334 regs = task_pt_regs(current);
4335
4336 /* Trace events already protected against recursion */ 4341 /* Trace events already protected against recursion */
4337 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4342 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4338 &data, regs); 4343 &data, regs);
4339} 4344}
4340EXPORT_SYMBOL_GPL(perf_tp_event); 4345EXPORT_SYMBOL_GPL(perf_tp_event);
4341 4346
@@ -4351,7 +4356,7 @@ static int perf_tp_event_match(struct perf_event *event,
4351 4356
4352static void tp_perf_event_destroy(struct perf_event *event) 4357static void tp_perf_event_destroy(struct perf_event *event)
4353{ 4358{
4354 ftrace_profile_disable(event->attr.config); 4359 perf_trace_disable(event->attr.config);
4355} 4360}
4356 4361
4357static const struct pmu *tp_perf_event_init(struct perf_event *event) 4362static const struct pmu *tp_perf_event_init(struct perf_event *event)
@@ -4365,7 +4370,7 @@ static const struct pmu *tp_perf_event_init(struct perf_event *event)
4365 !capable(CAP_SYS_ADMIN)) 4370 !capable(CAP_SYS_ADMIN))
4366 return ERR_PTR(-EPERM); 4371 return ERR_PTR(-EPERM);
4367 4372
4368 if (ftrace_profile_enable(event->attr.config)) 4373 if (perf_trace_enable(event->attr.config))
4369 return NULL; 4374 return NULL;
4370 4375
4371 event->destroy = tp_perf_event_destroy; 4376 event->destroy = tp_perf_event_destroy;
@@ -4892,7 +4897,7 @@ err_fput_free_put_context:
4892 4897
4893err_free_put_context: 4898err_free_put_context:
4894 if (err < 0) 4899 if (err < 0)
4895 kfree(event); 4900 free_event(event);
4896 4901
4897err_put_context: 4902err_put_context:
4898 if (err < 0) 4903 if (err < 0)
@@ -5372,18 +5377,26 @@ int perf_event_init_task(struct task_struct *child)
5372 return ret; 5377 return ret;
5373} 5378}
5374 5379
5380static void __init perf_event_init_all_cpus(void)
5381{
5382 int cpu;
5383 struct perf_cpu_context *cpuctx;
5384
5385 for_each_possible_cpu(cpu) {
5386 cpuctx = &per_cpu(perf_cpu_context, cpu);
5387 __perf_event_init_context(&cpuctx->ctx, NULL);
5388 }
5389}
5390
5375static void __cpuinit perf_event_init_cpu(int cpu) 5391static void __cpuinit perf_event_init_cpu(int cpu)
5376{ 5392{
5377 struct perf_cpu_context *cpuctx; 5393 struct perf_cpu_context *cpuctx;
5378 5394
5379 cpuctx = &per_cpu(perf_cpu_context, cpu); 5395 cpuctx = &per_cpu(perf_cpu_context, cpu);
5380 __perf_event_init_context(&cpuctx->ctx, NULL);
5381 5396
5382 spin_lock(&perf_resource_lock); 5397 spin_lock(&perf_resource_lock);
5383 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu; 5398 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5384 spin_unlock(&perf_resource_lock); 5399 spin_unlock(&perf_resource_lock);
5385
5386 hw_perf_event_setup(cpu);
5387} 5400}
5388 5401
5389#ifdef CONFIG_HOTPLUG_CPU 5402#ifdef CONFIG_HOTPLUG_CPU
@@ -5423,20 +5436,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
5423 perf_event_init_cpu(cpu); 5436 perf_event_init_cpu(cpu);
5424 break; 5437 break;
5425 5438
5426 case CPU_ONLINE:
5427 case CPU_ONLINE_FROZEN:
5428 hw_perf_event_setup_online(cpu);
5429 break;
5430
5431 case CPU_DOWN_PREPARE: 5439 case CPU_DOWN_PREPARE:
5432 case CPU_DOWN_PREPARE_FROZEN: 5440 case CPU_DOWN_PREPARE_FROZEN:
5433 perf_event_exit_cpu(cpu); 5441 perf_event_exit_cpu(cpu);
5434 break; 5442 break;
5435 5443
5436 case CPU_DEAD:
5437 hw_perf_event_setup_offline(cpu);
5438 break;
5439
5440 default: 5444 default:
5441 break; 5445 break;
5442 } 5446 }
@@ -5454,6 +5458,7 @@ static struct notifier_block __cpuinitdata perf_cpu_nb = {
5454 5458
5455void __init perf_event_init(void) 5459void __init perf_event_init(void)
5456{ 5460{
5461 perf_event_init_all_cpus();
5457 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 5462 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
5458 (void *)(long)smp_processor_id()); 5463 (void *)(long)smp_processor_id());
5459 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE, 5464 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 79aac93acf99..a5aff94e1f0b 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -13,6 +13,7 @@
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/acct.h> 15#include <linux/acct.h>
16#include <linux/slab.h>
16 17
17#define BITS_PER_PAGE (PAGE_SIZE*8) 18#define BITS_PER_PAGE (PAGE_SIZE*8)
18 19
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 564b3b0240dd..799f360d1475 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -997,9 +997,9 @@ static void check_thread_timers(struct task_struct *tsk,
997 } 997 }
998} 998}
999 999
1000static void stop_process_timers(struct task_struct *tsk) 1000static void stop_process_timers(struct signal_struct *sig)
1001{ 1001{
1002 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 1002 struct thread_group_cputimer *cputimer = &sig->cputimer;
1003 unsigned long flags; 1003 unsigned long flags;
1004 1004
1005 if (!cputimer->running) 1005 if (!cputimer->running)
@@ -1008,6 +1008,10 @@ static void stop_process_timers(struct task_struct *tsk)
1008 spin_lock_irqsave(&cputimer->lock, flags); 1008 spin_lock_irqsave(&cputimer->lock, flags);
1009 cputimer->running = 0; 1009 cputimer->running = 0;
1010 spin_unlock_irqrestore(&cputimer->lock, flags); 1010 spin_unlock_irqrestore(&cputimer->lock, flags);
1011
1012 sig->cputime_expires.prof_exp = cputime_zero;
1013 sig->cputime_expires.virt_exp = cputime_zero;
1014 sig->cputime_expires.sched_exp = 0;
1011} 1015}
1012 1016
1013static u32 onecputick; 1017static u32 onecputick;
@@ -1069,7 +1073,7 @@ static void check_process_timers(struct task_struct *tsk,
1069 list_empty(&timers[CPUCLOCK_VIRT]) && 1073 list_empty(&timers[CPUCLOCK_VIRT]) &&
1070 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && 1074 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1071 list_empty(&timers[CPUCLOCK_SCHED])) { 1075 list_empty(&timers[CPUCLOCK_SCHED])) {
1072 stop_process_timers(tsk); 1076 stop_process_timers(sig);
1073 return; 1077 return;
1074 } 1078 }
1075 1079
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index da5288ec2392..aa9e916da4d5 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -22,6 +22,7 @@
22#include <linux/console.h> 22#include <linux/console.h>
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <linux/gfp.h>
25#include <scsi/scsi_scan.h> 26#include <scsi/scsi_scan.h>
26#include <asm/suspend.h> 27#include <asm/suspend.h>
27 28
diff --git a/kernel/power/hibernate_nvs.c b/kernel/power/hibernate_nvs.c
index 39ac698ef836..fdcad9ed5a7b 100644
--- a/kernel/power/hibernate_nvs.c
+++ b/kernel/power/hibernate_nvs.c
@@ -10,6 +10,7 @@
10#include <linux/kernel.h> 10#include <linux/kernel.h>
11#include <linux/list.h> 11#include <linux/list.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/slab.h>
13#include <linux/suspend.h> 14#include <linux/suspend.h>
14 15
15/* 16/*
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5ade1bdcf366..71ae29052ab6 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -88,12 +88,11 @@ static int try_to_freeze_tasks(bool sig_only)
88 printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds " 88 printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
89 "(%d tasks refusing to freeze):\n", 89 "(%d tasks refusing to freeze):\n",
90 elapsed_csecs / 100, elapsed_csecs % 100, todo); 90 elapsed_csecs / 100, elapsed_csecs % 100, todo);
91 show_state();
92 read_lock(&tasklist_lock); 91 read_lock(&tasklist_lock);
93 do_each_thread(g, p) { 92 do_each_thread(g, p) {
94 task_lock(p); 93 task_lock(p);
95 if (freezing(p) && !freezer_should_skip(p)) 94 if (freezing(p) && !freezer_should_skip(p))
96 printk(KERN_ERR " %s\n", p->comm); 95 sched_show_task(p);
97 cancel_freezing(p); 96 cancel_freezing(p);
98 task_unlock(p); 97 task_unlock(p);
99 } while_each_thread(g, p); 98 } while_each_thread(g, p);
@@ -145,7 +144,7 @@ static void thaw_tasks(bool nosig_only)
145 if (nosig_only && should_send_signal(p)) 144 if (nosig_only && should_send_signal(p))
146 continue; 145 continue;
147 146
148 if (cgroup_frozen(p)) 147 if (cgroup_freezing_or_frozen(p))
149 continue; 148 continue;
150 149
151 thaw_process(p); 150 thaw_process(p);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 830cadecbdfc..be861c26dda7 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -26,6 +26,7 @@
26#include <linux/console.h> 26#include <linux/console.h>
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/slab.h>
29 30
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
31#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 44cce10b582d..56e7dbb8b996 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -15,6 +15,7 @@
15#include <linux/console.h> 15#include <linux/console.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/syscalls.h> 17#include <linux/syscalls.h>
18#include <linux/gfp.h>
18 19
19#include "power.h" 20#include "power.h"
20 21
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 1d575733d4e1..66824d71983a 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -23,6 +23,7 @@
23#include <linux/swap.h> 23#include <linux/swap.h>
24#include <linux/swapops.h> 24#include <linux/swapops.h>
25#include <linux/pm.h> 25#include <linux/pm.h>
26#include <linux/slab.h>
26 27
27#include "power.h" 28#include "power.h"
28 29
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 4d2289626a84..a8c96212bc1b 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -420,7 +420,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
420 * User space encodes device types as two-byte values, 420 * User space encodes device types as two-byte values,
421 * so we need to recode them 421 * so we need to recode them
422 */ 422 */
423 swdev = old_decode_dev(swap_area.dev); 423 swdev = new_decode_dev(swap_area.dev);
424 if (swdev) { 424 if (swdev) {
425 offset = swap_area.offset; 425 offset = swap_area.offset;
426 data->swap = swap_type_of(swdev, offset, NULL); 426 data->swap = swap_type_of(swdev, offset, NULL);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f1125c1a6321..49d808e833b0 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -45,6 +45,7 @@
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/module.h> 46#include <linux/module.h>
47#include <linux/kernel_stat.h> 47#include <linux/kernel_stat.h>
48#include <linux/hardirq.h>
48 49
49#ifdef CONFIG_DEBUG_LOCK_ALLOC 50#ifdef CONFIG_DEBUG_LOCK_ALLOC
50static struct lock_class_key rcu_lock_key; 51static struct lock_class_key rcu_lock_key;
@@ -66,6 +67,35 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
66int rcu_scheduler_active __read_mostly; 67int rcu_scheduler_active __read_mostly;
67EXPORT_SYMBOL_GPL(rcu_scheduler_active); 68EXPORT_SYMBOL_GPL(rcu_scheduler_active);
68 69
70#ifdef CONFIG_DEBUG_LOCK_ALLOC
71
72int debug_lockdep_rcu_enabled(void)
73{
74 return rcu_scheduler_active && debug_locks &&
75 current->lockdep_recursion == 0;
76}
77EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
78
79/**
80 * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
81 *
82 * Check for bottom half being disabled, which covers both the
83 * CONFIG_PROVE_RCU and not cases. Note that if someone uses
84 * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
85 * will show the situation.
86 *
87 * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
88 */
89int rcu_read_lock_bh_held(void)
90{
91 if (!debug_lockdep_rcu_enabled())
92 return 1;
93 return in_softirq();
94}
95EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
96
97#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
98
69/* 99/*
70 * This function is invoked towards the end of the scheduler's initialization 100 * This function is invoked towards the end of the scheduler's initialization
71 * process. Before this is called, the idle task might contain 101 * process. Before this is called, the idle task might contain
@@ -92,3 +122,14 @@ void wakeme_after_rcu(struct rcu_head *head)
92 rcu = container_of(head, struct rcu_synchronize, head); 122 rcu = container_of(head, struct rcu_synchronize, head);
93 complete(&rcu->completion); 123 complete(&rcu->completion);
94} 124}
125
126#ifdef CONFIG_PROVE_RCU
127/*
128 * wrapper function to avoid #include problems.
129 */
130int rcu_my_thread_group_empty(void)
131{
132 return thread_group_empty(current);
133}
134EXPORT_SYMBOL_GPL(rcu_my_thread_group_empty);
135#endif /* #ifdef CONFIG_PROVE_RCU */
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index bcdabf37c40b..c7eaa37a768b 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -10,7 +10,6 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/parser.h> 11#include <linux/parser.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13#include <linux/slab.h>
14#include <linux/res_counter.h> 13#include <linux/res_counter.h>
15#include <linux/uaccess.h> 14#include <linux/uaccess.h>
16#include <linux/mm.h> 15#include <linux/mm.h>
diff --git a/kernel/resource.c b/kernel/resource.c
index 2d5be5d9bf5f..9c358e263534 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -219,19 +219,34 @@ void release_child_resources(struct resource *r)
219} 219}
220 220
221/** 221/**
222 * request_resource - request and reserve an I/O or memory resource 222 * request_resource_conflict - request and reserve an I/O or memory resource
223 * @root: root resource descriptor 223 * @root: root resource descriptor
224 * @new: resource descriptor desired by caller 224 * @new: resource descriptor desired by caller
225 * 225 *
226 * Returns 0 for success, negative error code on error. 226 * Returns 0 for success, conflict resource on error.
227 */ 227 */
228int request_resource(struct resource *root, struct resource *new) 228struct resource *request_resource_conflict(struct resource *root, struct resource *new)
229{ 229{
230 struct resource *conflict; 230 struct resource *conflict;
231 231
232 write_lock(&resource_lock); 232 write_lock(&resource_lock);
233 conflict = __request_resource(root, new); 233 conflict = __request_resource(root, new);
234 write_unlock(&resource_lock); 234 write_unlock(&resource_lock);
235 return conflict;
236}
237
238/**
239 * request_resource - request and reserve an I/O or memory resource
240 * @root: root resource descriptor
241 * @new: resource descriptor desired by caller
242 *
243 * Returns 0 for success, negative error code on error.
244 */
245int request_resource(struct resource *root, struct resource *new)
246{
247 struct resource *conflict;
248
249 conflict = request_resource_conflict(root, new);
235 return conflict ? -EBUSY : 0; 250 return conflict ? -EBUSY : 0;
236} 251}
237 252
@@ -474,25 +489,40 @@ static struct resource * __insert_resource(struct resource *parent, struct resou
474} 489}
475 490
476/** 491/**
477 * insert_resource - Inserts a resource in the resource tree 492 * insert_resource_conflict - Inserts resource in the resource tree
478 * @parent: parent of the new resource 493 * @parent: parent of the new resource
479 * @new: new resource to insert 494 * @new: new resource to insert
480 * 495 *
481 * Returns 0 on success, -EBUSY if the resource can't be inserted. 496 * Returns 0 on success, conflict resource if the resource can't be inserted.
482 * 497 *
483 * This function is equivalent to request_resource when no conflict 498 * This function is equivalent to request_resource_conflict when no conflict
484 * happens. If a conflict happens, and the conflicting resources 499 * happens. If a conflict happens, and the conflicting resources
485 * entirely fit within the range of the new resource, then the new 500 * entirely fit within the range of the new resource, then the new
486 * resource is inserted and the conflicting resources become children of 501 * resource is inserted and the conflicting resources become children of
487 * the new resource. 502 * the new resource.
488 */ 503 */
489int insert_resource(struct resource *parent, struct resource *new) 504struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
490{ 505{
491 struct resource *conflict; 506 struct resource *conflict;
492 507
493 write_lock(&resource_lock); 508 write_lock(&resource_lock);
494 conflict = __insert_resource(parent, new); 509 conflict = __insert_resource(parent, new);
495 write_unlock(&resource_lock); 510 write_unlock(&resource_lock);
511 return conflict;
512}
513
514/**
515 * insert_resource - Inserts a resource in the resource tree
516 * @parent: parent of the new resource
517 * @new: new resource to insert
518 *
519 * Returns 0 on success, -EBUSY if the resource can't be inserted.
520 */
521int insert_resource(struct resource *parent, struct resource *new)
522{
523 struct resource *conflict;
524
525 conflict = insert_resource_conflict(parent, new);
496 return conflict ? -EBUSY : 0; 526 return conflict ? -EBUSY : 0;
497} 527}
498 528
diff --git a/kernel/sched.c b/kernel/sched.c
index 9ab3cd7858d3..3c2a54f70ffe 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <linux/slab.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -322,6 +323,15 @@ static inline struct task_group *task_group(struct task_struct *p)
322/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */ 323/* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
323static inline void set_task_rq(struct task_struct *p, unsigned int cpu) 324static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
324{ 325{
326 /*
327 * Strictly speaking this rcu_read_lock() is not needed since the
328 * task_group is tied to the cgroup, which in turn can never go away
329 * as long as there are tasks attached to it.
330 *
331 * However since task_group() uses task_subsys_state() which is an
332 * rcu_dereference() user, this quiets CONFIG_PROVE_RCU.
333 */
334 rcu_read_lock();
325#ifdef CONFIG_FAIR_GROUP_SCHED 335#ifdef CONFIG_FAIR_GROUP_SCHED
326 p->se.cfs_rq = task_group(p)->cfs_rq[cpu]; 336 p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
327 p->se.parent = task_group(p)->se[cpu]; 337 p->se.parent = task_group(p)->se[cpu];
@@ -331,6 +341,7 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
331 p->rt.rt_rq = task_group(p)->rt_rq[cpu]; 341 p->rt.rt_rq = task_group(p)->rt_rq[cpu];
332 p->rt.parent = task_group(p)->rt_se[cpu]; 342 p->rt.parent = task_group(p)->rt_se[cpu];
333#endif 343#endif
344 rcu_read_unlock();
334} 345}
335 346
336#else 347#else
@@ -2650,7 +2661,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2650{ 2661{
2651 unsigned long flags; 2662 unsigned long flags;
2652 struct rq *rq; 2663 struct rq *rq;
2653 int cpu = get_cpu(); 2664 int cpu __maybe_unused = get_cpu();
2654 2665
2655#ifdef CONFIG_SMP 2666#ifdef CONFIG_SMP
2656 /* 2667 /*
@@ -3779,7 +3790,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3779 * the mutex owner just released it and exited. 3790 * the mutex owner just released it and exited.
3780 */ 3791 */
3781 if (probe_kernel_address(&owner->cpu, cpu)) 3792 if (probe_kernel_address(&owner->cpu, cpu))
3782 goto out; 3793 return 0;
3783#else 3794#else
3784 cpu = owner->cpu; 3795 cpu = owner->cpu;
3785#endif 3796#endif
@@ -3789,14 +3800,14 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3789 * the cpu field may no longer be valid. 3800 * the cpu field may no longer be valid.
3790 */ 3801 */
3791 if (cpu >= nr_cpumask_bits) 3802 if (cpu >= nr_cpumask_bits)
3792 goto out; 3803 return 0;
3793 3804
3794 /* 3805 /*
3795 * We need to validate that we can do a 3806 * We need to validate that we can do a
3796 * get_cpu() and that we have the percpu area. 3807 * get_cpu() and that we have the percpu area.
3797 */ 3808 */
3798 if (!cpu_online(cpu)) 3809 if (!cpu_online(cpu))
3799 goto out; 3810 return 0;
3800 3811
3801 rq = cpu_rq(cpu); 3812 rq = cpu_rq(cpu);
3802 3813
@@ -3815,7 +3826,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3815 3826
3816 cpu_relax(); 3827 cpu_relax();
3817 } 3828 }
3818out: 3829
3819 return 1; 3830 return 1;
3820} 3831}
3821#endif 3832#endif
@@ -4902,7 +4913,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4902 int ret; 4913 int ret;
4903 cpumask_var_t mask; 4914 cpumask_var_t mask;
4904 4915
4905 if (len < cpumask_size()) 4916 if ((len * BITS_PER_BYTE) < nr_cpu_ids)
4917 return -EINVAL;
4918 if (len & (sizeof(unsigned long)-1))
4906 return -EINVAL; 4919 return -EINVAL;
4907 4920
4908 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) 4921 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -4910,10 +4923,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
4910 4923
4911 ret = sched_getaffinity(pid, mask); 4924 ret = sched_getaffinity(pid, mask);
4912 if (ret == 0) { 4925 if (ret == 0) {
4913 if (copy_to_user(user_mask_ptr, mask, cpumask_size())) 4926 size_t retlen = min_t(size_t, len, cpumask_size());
4927
4928 if (copy_to_user(user_mask_ptr, mask, retlen))
4914 ret = -EFAULT; 4929 ret = -EFAULT;
4915 else 4930 else
4916 ret = cpumask_size(); 4931 ret = retlen;
4917 } 4932 }
4918 free_cpumask_var(mask); 4933 free_cpumask_var(mask);
4919 4934
@@ -5383,7 +5398,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
5383 5398
5384 get_task_struct(mt); 5399 get_task_struct(mt);
5385 task_rq_unlock(rq, &flags); 5400 task_rq_unlock(rq, &flags);
5386 wake_up_process(rq->migration_thread); 5401 wake_up_process(mt);
5387 put_task_struct(mt); 5402 put_task_struct(mt);
5388 wait_for_completion(&req.done); 5403 wait_for_completion(&req.done);
5389 tlb_migrate_finish(p->mm); 5404 tlb_migrate_finish(p->mm);
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index fccf9fbb0d7b..e6871cb3fc83 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -27,6 +27,7 @@
27 * of the License. 27 * of the License.
28 */ 28 */
29 29
30#include <linux/gfp.h>
30#include "sched_cpupri.h" 31#include "sched_cpupri.h"
31 32
32/* Convert between a 140 based task->prio, and our 102 based cpupri */ 33/* Convert between a 140 based task->prio, and our 102 based cpupri */
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 67f95aada4b9..19be00ba6123 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -114,7 +114,9 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
114 { 114 {
115 char path[64]; 115 char path[64];
116 116
117 rcu_read_lock();
117 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path)); 118 cgroup_path(task_group(p)->css.cgroup, path, sizeof(path));
119 rcu_read_unlock();
118 SEQ_printf(m, " %s", path); 120 SEQ_printf(m, " %s", path);
119 } 121 }
120#endif 122#endif
@@ -518,8 +520,4 @@ void proc_sched_set_task(struct task_struct *p)
518 p->se.nr_wakeups_idle = 0; 520 p->se.nr_wakeups_idle = 0;
519 p->sched_info.bkl_count = 0; 521 p->sched_info.bkl_count = 0;
520#endif 522#endif
521 p->se.sum_exec_runtime = 0;
522 p->se.prev_sum_exec_runtime = 0;
523 p->nvcsw = 0;
524 p->nivcsw = 0;
525} 523}
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 7494bbf5a270..7d3f4fa9ef4f 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -637,7 +637,7 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
637 goto cancelled; 637 goto cancelled;
638 638
639 /* the timer holds a reference whilst it is pending */ 639 /* the timer holds a reference whilst it is pending */
640 ret = work->ops->get_ref(work); 640 ret = slow_work_get_ref(work);
641 if (ret < 0) 641 if (ret < 0)
642 goto cant_get_ref; 642 goto cant_get_ref;
643 643
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
index 321f3c59d732..a29ebd1ef41d 100644
--- a/kernel/slow-work.h
+++ b/kernel/slow-work.h
@@ -43,28 +43,28 @@ extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
43 */ 43 */
44static inline void slow_work_set_thread_pid(int id, pid_t pid) 44static inline void slow_work_set_thread_pid(int id, pid_t pid)
45{ 45{
46#ifdef CONFIG_SLOW_WORK_PROC 46#ifdef CONFIG_SLOW_WORK_DEBUG
47 slow_work_pids[id] = pid; 47 slow_work_pids[id] = pid;
48#endif 48#endif
49} 49}
50 50
51static inline void slow_work_mark_time(struct slow_work *work) 51static inline void slow_work_mark_time(struct slow_work *work)
52{ 52{
53#ifdef CONFIG_SLOW_WORK_PROC 53#ifdef CONFIG_SLOW_WORK_DEBUG
54 work->mark = CURRENT_TIME; 54 work->mark = CURRENT_TIME;
55#endif 55#endif
56} 56}
57 57
58static inline void slow_work_begin_exec(int id, struct slow_work *work) 58static inline void slow_work_begin_exec(int id, struct slow_work *work)
59{ 59{
60#ifdef CONFIG_SLOW_WORK_PROC 60#ifdef CONFIG_SLOW_WORK_DEBUG
61 slow_work_execs[id] = work; 61 slow_work_execs[id] = work;
62#endif 62#endif
63} 63}
64 64
65static inline void slow_work_end_exec(int id, struct slow_work *work) 65static inline void slow_work_end_exec(int id, struct slow_work *work)
66{ 66{
67#ifdef CONFIG_SLOW_WORK_PROC 67#ifdef CONFIG_SLOW_WORK_DEBUG
68 write_lock(&slow_work_execs_lock); 68 write_lock(&slow_work_execs_lock);
69 slow_work_execs[id] = NULL; 69 slow_work_execs[id] = NULL;
70 write_unlock(&slow_work_execs_lock); 70 write_unlock(&slow_work_execs_lock);
diff --git a/kernel/smp.c b/kernel/smp.c
index 9867b6bfefce..3fc697336183 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/percpu.h> 10#include <linux/percpu.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/gfp.h>
12#include <linux/smp.h> 13#include <linux/smp.h>
13#include <linux/cpu.h> 14#include <linux/cpu.h>
14 15
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 0d4c7898ab80..4b493f67dcb5 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -155,11 +155,11 @@ void softlockup_tick(void)
155 * Wake up the high-prio watchdog task twice per 155 * Wake up the high-prio watchdog task twice per
156 * threshold timespan. 156 * threshold timespan.
157 */ 157 */
158 if (now > touch_ts + softlockup_thresh/2) 158 if (time_after(now - softlockup_thresh/2, touch_ts))
159 wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); 159 wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
160 160
161 /* Warn about unreasonable delays: */ 161 /* Warn about unreasonable delays: */
162 if (now <= (touch_ts + softlockup_thresh)) 162 if (time_before_eq(now - softlockup_thresh, touch_ts))
163 return; 163 return;
164 164
165 per_cpu(softlockup_print_ts, this_cpu) = touch_ts; 165 per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
diff --git a/kernel/srcu.c b/kernel/srcu.c
index bde4295774c8..2980da3fd509 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -30,7 +30,6 @@
30#include <linux/preempt.h> 30#include <linux/preempt.h>
31#include <linux/rcupdate.h> 31#include <linux/rcupdate.h>
32#include <linux/sched.h> 32#include <linux/sched.h>
33#include <linux/slab.h>
34#include <linux/smp.h> 33#include <linux/smp.h>
35#include <linux/srcu.h> 34#include <linux/srcu.h>
36 35
diff --git a/kernel/sys.c b/kernel/sys.c
index 8298878f4f71..7cb426a58965 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -36,6 +36,7 @@
36#include <linux/personality.h> 36#include <linux/personality.h>
37#include <linux/ptrace.h> 37#include <linux/ptrace.h>
38#include <linux/fs_struct.h> 38#include <linux/fs_struct.h>
39#include <linux/gfp.h>
39 40
40#include <linux/compat.h> 41#include <linux/compat.h>
41#include <linux/syscalls.h> 42#include <linux/syscalls.h>
@@ -1117,7 +1118,7 @@ DECLARE_RWSEM(uts_sem);
1117 1118
1118#ifdef COMPAT_UTS_MACHINE 1119#ifdef COMPAT_UTS_MACHINE
1119#define override_architecture(name) \ 1120#define override_architecture(name) \
1120 (current->personality == PER_LINUX32 && \ 1121 (personality(current->personality) == PER_LINUX32 && \
1121 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \ 1122 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1122 sizeof(COMPAT_UTS_MACHINE))) 1123 sizeof(COMPAT_UTS_MACHINE)))
1123#else 1124#else
diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
index 8cd50d8f9bde..59030570f5ca 100644
--- a/kernel/sysctl_binary.c
+++ b/kernel/sysctl_binary.c
@@ -13,6 +13,7 @@
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/slab.h>
16 17
17#ifdef CONFIG_SYSCTL_SYSCALL 18#ifdef CONFIG_SYSCTL_SYSCALL
18 19
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 899ca51be5e8..11281d5792bd 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -22,6 +22,7 @@
22#include <linux/delayacct.h> 22#include <linux/delayacct.h>
23#include <linux/cpumask.h> 23#include <linux/cpumask.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/slab.h>
25#include <linux/cgroupstats.h> 26#include <linux/cgroupstats.h>
26#include <linux/cgroup.h> 27#include <linux/cgroup.h>
27#include <linux/fs.h> 28#include <linux/fs.h>
diff --git a/kernel/time.c b/kernel/time.c
index 2358a3646a63..50612faa9baf 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -35,7 +35,6 @@
35#include <linux/syscalls.h> 35#include <linux/syscalls.h>
36#include <linux/security.h> 36#include <linux/security.h>
37#include <linux/fs.h> 37#include <linux/fs.h>
38#include <linux/slab.h>
39#include <linux/math64.h> 38#include <linux/math64.h>
40#include <linux/ptrace.h> 39#include <linux/ptrace.h>
41 40
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 0a8a213016f0..aada0e52680a 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -22,6 +22,29 @@
22 22
23#include "tick-internal.h" 23#include "tick-internal.h"
24 24
25/* Limit min_delta to a jiffie */
26#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
27
28static int tick_increase_min_delta(struct clock_event_device *dev)
29{
30 /* Nothing to do if we already reached the limit */
31 if (dev->min_delta_ns >= MIN_DELTA_LIMIT)
32 return -ETIME;
33
34 if (dev->min_delta_ns < 5000)
35 dev->min_delta_ns = 5000;
36 else
37 dev->min_delta_ns += dev->min_delta_ns >> 1;
38
39 if (dev->min_delta_ns > MIN_DELTA_LIMIT)
40 dev->min_delta_ns = MIN_DELTA_LIMIT;
41
42 printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
43 dev->name ? dev->name : "?",
44 (unsigned long long) dev->min_delta_ns);
45 return 0;
46}
47
25/** 48/**
26 * tick_program_event internal worker function 49 * tick_program_event internal worker function
27 */ 50 */
@@ -37,23 +60,28 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
37 if (!ret || !force) 60 if (!ret || !force)
38 return ret; 61 return ret;
39 62
63 dev->retries++;
40 /* 64 /*
41 * We tried 2 times to program the device with the given 65 * We tried 3 times to program the device with the given
42 * min_delta_ns. If that's not working then we double it 66 * min_delta_ns. If that's not working then we increase it
43 * and emit a warning. 67 * and emit a warning.
44 */ 68 */
45 if (++i > 2) { 69 if (++i > 2) {
46 /* Increase the min. delta and try again */ 70 /* Increase the min. delta and try again */
47 if (!dev->min_delta_ns) 71 if (tick_increase_min_delta(dev)) {
48 dev->min_delta_ns = 5000; 72 /*
49 else 73 * Get out of the loop if min_delta_ns
50 dev->min_delta_ns += dev->min_delta_ns >> 1; 74 * hit the limit already. That's
51 75 * better than staying here forever.
52 printk(KERN_WARNING 76 *
53 "CE: %s increasing min_delta_ns to %llu nsec\n", 77 * We clear next_event so we have a
54 dev->name ? dev->name : "?", 78 * chance that the box survives.
55 (unsigned long long) dev->min_delta_ns << 1); 79 */
56 80 printk(KERN_WARNING
81 "CE: Reprogramming failure. Giving up\n");
82 dev->next_event.tv64 = KTIME_MAX;
83 return -ETIME;
84 }
57 i = 0; 85 i = 0;
58 } 86 }
59 87
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
index 12f5c55090be..ac38fbb176cc 100644
--- a/kernel/time/timecompare.c
+++ b/kernel/time/timecompare.c
@@ -19,6 +19,7 @@
19 19
20#include <linux/timecompare.h> 20#include <linux/timecompare.h>
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/slab.h>
22#include <linux/math64.h> 23#include <linux/math64.h>
23 24
24/* 25/*
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 1137f245a4ba..caf8d4d4f5c8 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -806,7 +806,8 @@ void update_wall_time(void)
806 shift = min(shift, maxshift); 806 shift = min(shift, maxshift);
807 while (offset >= timekeeper.cycle_interval) { 807 while (offset >= timekeeper.cycle_interval) {
808 offset = logarithmic_accumulation(offset, shift); 808 offset = logarithmic_accumulation(offset, shift);
809 shift--; 809 if(offset < timekeeper.cycle_interval<<shift)
810 shift--;
810 } 811 }
811 812
812 /* correct the clock when NTP error is too big */ 813 /* correct the clock when NTP error is too big */
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index bdfb8dd1050c..1a4a7dd78777 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -228,6 +228,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
228 SEQ_printf(m, " event_handler: "); 228 SEQ_printf(m, " event_handler: ");
229 print_name_offset(m, dev->event_handler); 229 print_name_offset(m, dev->event_handler);
230 SEQ_printf(m, "\n"); 230 SEQ_printf(m, "\n");
231 SEQ_printf(m, " retries: %lu\n", dev->retries);
231} 232}
232 233
233static void timer_list_show_tickdevices(struct seq_file *m) 234static void timer_list_show_tickdevices(struct seq_file *m)
@@ -257,7 +258,7 @@ static int timer_list_show(struct seq_file *m, void *v)
257 u64 now = ktime_to_ns(ktime_get()); 258 u64 now = ktime_to_ns(ktime_get());
258 int cpu; 259 int cpu;
259 260
260 SEQ_printf(m, "Timer List Version: v0.5\n"); 261 SEQ_printf(m, "Timer List Version: v0.6\n");
261 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); 262 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
262 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); 263 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
263 264
diff --git a/kernel/timer.c b/kernel/timer.c
index 49773f38c9bc..9199f3c52215 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -39,6 +39,7 @@
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/perf_event.h> 40#include <linux/perf_event.h>
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/slab.h>
42 43
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
44#include <asm/unistd.h> 45#include <asm/unistd.h>
@@ -936,6 +937,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
936 if (base->running_timer == timer) 937 if (base->running_timer == timer)
937 goto out; 938 goto out;
938 939
940 timer_stats_timer_clear_start_info(timer);
939 ret = 0; 941 ret = 0;
940 if (timer_pending(timer)) { 942 if (timer_pending(timer)) {
941 detach_timer(timer, 1); 943 detach_timer(timer, 1);
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index d00c6fe23f54..78edc6490038 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events.o
52obj-$(CONFIG_EVENT_TRACING) += trace_export.o 52obj-$(CONFIG_EVENT_TRACING) += trace_export.o
53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o 53obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o
54ifeq ($(CONFIG_PERF_EVENTS),y) 54ifeq ($(CONFIG_PERF_EVENTS),y)
55obj-$(CONFIG_EVENT_TRACING) += trace_event_profile.o 55obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o
56endif 56endif
57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o 57obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o
58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o 58obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 07f945a99430..b3bc91a3f510 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -21,6 +21,7 @@
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/slab.h>
24#include <linux/debugfs.h> 25#include <linux/debugfs.h>
25#include <linux/smp_lock.h> 26#include <linux/smp_lock.h>
26#include <linux/time.h> 27#include <linux/time.h>
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d9062f5cc0c0..2404b59b3097 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -24,6 +24,7 @@
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/ftrace.h> 25#include <linux/ftrace.h>
26#include <linux/sysctl.h> 26#include <linux/sysctl.h>
27#include <linux/slab.h>
27#include <linux/ctype.h> 28#include <linux/ctype.h>
28#include <linux/list.h> 29#include <linux/list.h>
29#include <linux/hash.h> 30#include <linux/hash.h>
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 9f4f565b01e6..a22582a06161 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -9,7 +9,6 @@
9#include <linux/workqueue.h> 9#include <linux/workqueue.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/slab.h>
13 12
14#define CREATE_TRACE_POINTS 13#define CREATE_TRACE_POINTS
15#include <trace/events/power.h> 14#include <trace/events/power.h>
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 05a9f83b8819..41ca394feb22 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/percpu.h> 15#include <linux/percpu.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/slab.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/hash.h> 19#include <linux/hash.h>
19#include <linux/list.h> 20#include <linux/list.h>
@@ -207,6 +208,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
207#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) 208#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
208#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ 209#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
209 210
211#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
212# define RB_FORCE_8BYTE_ALIGNMENT 0
213# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
214#else
215# define RB_FORCE_8BYTE_ALIGNMENT 1
216# define RB_ARCH_ALIGNMENT 8U
217#endif
218
210/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ 219/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
211#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX 220#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
212 221
@@ -1201,18 +1210,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1201 1210
1202 for (i = 0; i < nr_pages; i++) { 1211 for (i = 0; i < nr_pages; i++) {
1203 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1212 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1204 return; 1213 goto out;
1205 p = cpu_buffer->pages->next; 1214 p = cpu_buffer->pages->next;
1206 bpage = list_entry(p, struct buffer_page, list); 1215 bpage = list_entry(p, struct buffer_page, list);
1207 list_del_init(&bpage->list); 1216 list_del_init(&bpage->list);
1208 free_buffer_page(bpage); 1217 free_buffer_page(bpage);
1209 } 1218 }
1210 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) 1219 if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
1211 return; 1220 goto out;
1212 1221
1213 rb_reset_cpu(cpu_buffer); 1222 rb_reset_cpu(cpu_buffer);
1214 rb_check_pages(cpu_buffer); 1223 rb_check_pages(cpu_buffer);
1215 1224
1225out:
1216 spin_unlock_irq(&cpu_buffer->reader_lock); 1226 spin_unlock_irq(&cpu_buffer->reader_lock);
1217} 1227}
1218 1228
@@ -1229,7 +1239,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1229 1239
1230 for (i = 0; i < nr_pages; i++) { 1240 for (i = 0; i < nr_pages; i++) {
1231 if (RB_WARN_ON(cpu_buffer, list_empty(pages))) 1241 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
1232 return; 1242 goto out;
1233 p = pages->next; 1243 p = pages->next;
1234 bpage = list_entry(p, struct buffer_page, list); 1244 bpage = list_entry(p, struct buffer_page, list);
1235 list_del_init(&bpage->list); 1245 list_del_init(&bpage->list);
@@ -1238,6 +1248,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1238 rb_reset_cpu(cpu_buffer); 1248 rb_reset_cpu(cpu_buffer);
1239 rb_check_pages(cpu_buffer); 1249 rb_check_pages(cpu_buffer);
1240 1250
1251out:
1241 spin_unlock_irq(&cpu_buffer->reader_lock); 1252 spin_unlock_irq(&cpu_buffer->reader_lock);
1242} 1253}
1243 1254
@@ -1547,7 +1558,7 @@ rb_update_event(struct ring_buffer_event *event,
1547 1558
1548 case 0: 1559 case 0:
1549 length -= RB_EVNT_HDR_SIZE; 1560 length -= RB_EVNT_HDR_SIZE;
1550 if (length > RB_MAX_SMALL_DATA) 1561 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1551 event->array[0] = length; 1562 event->array[0] = length;
1552 else 1563 else
1553 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); 1564 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1722,11 +1733,11 @@ static unsigned rb_calculate_event_length(unsigned length)
1722 if (!length) 1733 if (!length)
1723 length = 1; 1734 length = 1;
1724 1735
1725 if (length > RB_MAX_SMALL_DATA) 1736 if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
1726 length += sizeof(event.array[0]); 1737 length += sizeof(event.array[0]);
1727 1738
1728 length += RB_EVNT_HDR_SIZE; 1739 length += RB_EVNT_HDR_SIZE;
1729 length = ALIGN(length, RB_ALIGNMENT); 1740 length = ALIGN(length, RB_ARCH_ALIGNMENT);
1730 1741
1731 return length; 1742 return length;
1732} 1743}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3ec2ee6f6560..44f916a04065 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -33,10 +33,10 @@
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/string.h> 34#include <linux/string.h>
35#include <linux/rwsem.h> 35#include <linux/rwsem.h>
36#include <linux/slab.h>
36#include <linux/ctype.h> 37#include <linux/ctype.h>
37#include <linux/init.h> 38#include <linux/init.h>
38#include <linux/poll.h> 39#include <linux/poll.h>
39#include <linux/gfp.h>
40#include <linux/fs.h> 40#include <linux/fs.h>
41 41
42#include "trace.h" 42#include "trace.h"
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 6fbfb8f417b9..9d589d8dcd1a 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void)
84 int this_cpu; 84 int this_cpu;
85 u64 now; 85 u64 now;
86 86
87 raw_local_irq_save(flags); 87 local_irq_save(flags);
88 88
89 this_cpu = raw_smp_processor_id(); 89 this_cpu = raw_smp_processor_id();
90 now = cpu_clock(this_cpu); 90 now = cpu_clock(this_cpu);
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void)
110 arch_spin_unlock(&trace_clock_struct.lock); 110 arch_spin_unlock(&trace_clock_struct.lock);
111 111
112 out: 112 out:
113 raw_local_irq_restore(flags); 113 local_irq_restore(flags);
114 114
115 return now; 115 return now;
116} 116}
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_perf.c
index c1cc3ab633de..0565bb42566f 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_perf.c
@@ -1,32 +1,41 @@
1/* 1/*
2 * trace event based perf counter profiling 2 * trace event based perf event profiling/tracing
3 * 3 *
4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5 * 5 * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6 */ 6 */
7 7
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/kprobes.h> 9#include <linux/kprobes.h>
10#include "trace.h" 10#include "trace.h"
11 11
12DEFINE_PER_CPU(struct pt_regs, perf_trace_regs);
13EXPORT_PER_CPU_SYMBOL_GPL(perf_trace_regs);
14
15EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
12 16
13static char *perf_trace_buf; 17static char *perf_trace_buf;
14static char *perf_trace_buf_nmi; 18static char *perf_trace_buf_nmi;
15 19
16typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ; 20/*
21 * Force it to be aligned to unsigned long to avoid misaligned accesses
22 * suprises
23 */
24typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
25 perf_trace_t;
17 26
18/* Count the events in use (per event id, not per instance) */ 27/* Count the events in use (per event id, not per instance) */
19static int total_profile_count; 28static int total_ref_count;
20 29
21static int ftrace_profile_enable_event(struct ftrace_event_call *event) 30static int perf_trace_event_enable(struct ftrace_event_call *event)
22{ 31{
23 char *buf; 32 char *buf;
24 int ret = -ENOMEM; 33 int ret = -ENOMEM;
25 34
26 if (event->profile_count++ > 0) 35 if (event->perf_refcount++ > 0)
27 return 0; 36 return 0;
28 37
29 if (!total_profile_count) { 38 if (!total_ref_count) {
30 buf = (char *)alloc_percpu(perf_trace_t); 39 buf = (char *)alloc_percpu(perf_trace_t);
31 if (!buf) 40 if (!buf)
32 goto fail_buf; 41 goto fail_buf;
@@ -40,35 +49,35 @@ static int ftrace_profile_enable_event(struct ftrace_event_call *event)
40 rcu_assign_pointer(perf_trace_buf_nmi, buf); 49 rcu_assign_pointer(perf_trace_buf_nmi, buf);
41 } 50 }
42 51
43 ret = event->profile_enable(event); 52 ret = event->perf_event_enable(event);
44 if (!ret) { 53 if (!ret) {
45 total_profile_count++; 54 total_ref_count++;
46 return 0; 55 return 0;
47 } 56 }
48 57
49fail_buf_nmi: 58fail_buf_nmi:
50 if (!total_profile_count) { 59 if (!total_ref_count) {
51 free_percpu(perf_trace_buf_nmi); 60 free_percpu(perf_trace_buf_nmi);
52 free_percpu(perf_trace_buf); 61 free_percpu(perf_trace_buf);
53 perf_trace_buf_nmi = NULL; 62 perf_trace_buf_nmi = NULL;
54 perf_trace_buf = NULL; 63 perf_trace_buf = NULL;
55 } 64 }
56fail_buf: 65fail_buf:
57 event->profile_count--; 66 event->perf_refcount--;
58 67
59 return ret; 68 return ret;
60} 69}
61 70
62int ftrace_profile_enable(int event_id) 71int perf_trace_enable(int event_id)
63{ 72{
64 struct ftrace_event_call *event; 73 struct ftrace_event_call *event;
65 int ret = -EINVAL; 74 int ret = -EINVAL;
66 75
67 mutex_lock(&event_mutex); 76 mutex_lock(&event_mutex);
68 list_for_each_entry(event, &ftrace_events, list) { 77 list_for_each_entry(event, &ftrace_events, list) {
69 if (event->id == event_id && event->profile_enable && 78 if (event->id == event_id && event->perf_event_enable &&
70 try_module_get(event->mod)) { 79 try_module_get(event->mod)) {
71 ret = ftrace_profile_enable_event(event); 80 ret = perf_trace_event_enable(event);
72 break; 81 break;
73 } 82 }
74 } 83 }
@@ -77,16 +86,16 @@ int ftrace_profile_enable(int event_id)
77 return ret; 86 return ret;
78} 87}
79 88
80static void ftrace_profile_disable_event(struct ftrace_event_call *event) 89static void perf_trace_event_disable(struct ftrace_event_call *event)
81{ 90{
82 char *buf, *nmi_buf; 91 char *buf, *nmi_buf;
83 92
84 if (--event->profile_count > 0) 93 if (--event->perf_refcount > 0)
85 return; 94 return;
86 95
87 event->profile_disable(event); 96 event->perf_event_disable(event);
88 97
89 if (!--total_profile_count) { 98 if (!--total_ref_count) {
90 buf = perf_trace_buf; 99 buf = perf_trace_buf;
91 rcu_assign_pointer(perf_trace_buf, NULL); 100 rcu_assign_pointer(perf_trace_buf, NULL);
92 101
@@ -104,14 +113,14 @@ static void ftrace_profile_disable_event(struct ftrace_event_call *event)
104 } 113 }
105} 114}
106 115
107void ftrace_profile_disable(int event_id) 116void perf_trace_disable(int event_id)
108{ 117{
109 struct ftrace_event_call *event; 118 struct ftrace_event_call *event;
110 119
111 mutex_lock(&event_mutex); 120 mutex_lock(&event_mutex);
112 list_for_each_entry(event, &ftrace_events, list) { 121 list_for_each_entry(event, &ftrace_events, list) {
113 if (event->id == event_id) { 122 if (event->id == event_id) {
114 ftrace_profile_disable_event(event); 123 perf_trace_event_disable(event);
115 module_put(event->mod); 124 module_put(event->mod);
116 break; 125 break;
117 } 126 }
@@ -119,13 +128,15 @@ void ftrace_profile_disable(int event_id)
119 mutex_unlock(&event_mutex); 128 mutex_unlock(&event_mutex);
120} 129}
121 130
122__kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type, 131__kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
123 int *rctxp, unsigned long *irq_flags) 132 int *rctxp, unsigned long *irq_flags)
124{ 133{
125 struct trace_entry *entry; 134 struct trace_entry *entry;
126 char *trace_buf, *raw_data; 135 char *trace_buf, *raw_data;
127 int pc, cpu; 136 int pc, cpu;
128 137
138 BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
139
129 pc = preempt_count(); 140 pc = preempt_count();
130 141
131 /* Protect the per cpu buffer, begin the rcu read side */ 142 /* Protect the per cpu buffer, begin the rcu read side */
@@ -148,7 +159,7 @@ __kprobes void *ftrace_perf_buf_prepare(int size, unsigned short type,
148 raw_data = per_cpu_ptr(trace_buf, cpu); 159 raw_data = per_cpu_ptr(trace_buf, cpu);
149 160
150 /* zero the dead bytes from align to not leak stack to user */ 161 /* zero the dead bytes from align to not leak stack to user */
151 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 162 memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
152 163
153 entry = (struct trace_entry *)raw_data; 164 entry = (struct trace_entry *)raw_data;
154 tracing_generic_entry_update(entry, *irq_flags, pc); 165 tracing_generic_entry_update(entry, *irq_flags, pc);
@@ -161,4 +172,4 @@ err_recursion:
161 local_irq_restore(*irq_flags); 172 local_irq_restore(*irq_flags);
162 return NULL; 173 return NULL;
163} 174}
164EXPORT_SYMBOL_GPL(ftrace_perf_buf_prepare); 175EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 3f972ad98d04..c697c7043349 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -15,6 +15,7 @@
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/slab.h>
18#include <linux/delay.h> 19#include <linux/delay.h>
19 20
20#include <asm/setup.h> 21#include <asm/setup.h>
@@ -938,7 +939,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
938 trace_create_file("enable", 0644, call->dir, call, 939 trace_create_file("enable", 0644, call->dir, call,
939 enable); 940 enable);
940 941
941 if (call->id && call->profile_enable) 942 if (call->id && call->perf_event_enable)
942 trace_create_file("id", 0444, call->dir, call, 943 trace_create_file("id", 0444, call->dir, call,
943 id); 944 id);
944 945
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 4615f62a04f1..88c0b6dbd7fe 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -22,6 +22,7 @@
22#include <linux/ctype.h> 22#include <linux/ctype.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/perf_event.h> 24#include <linux/perf_event.h>
25#include <linux/slab.h>
25 26
26#include "trace.h" 27#include "trace.h"
27#include "trace_output.h" 28#include "trace_output.h"
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index e6989d9b44da..9aed1a5cf553 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -9,6 +9,7 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <linux/slab.h>
12#include <linux/fs.h> 13#include <linux/fs.h>
13 14
14#include "trace.h" 15#include "trace.h"
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 505c92273b1a..1251e367bae9 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1214,7 +1214,7 @@ static int set_print_fmt(struct trace_probe *tp)
1214#ifdef CONFIG_PERF_EVENTS 1214#ifdef CONFIG_PERF_EVENTS
1215 1215
1216/* Kprobe profile handler */ 1216/* Kprobe profile handler */
1217static __kprobes void kprobe_profile_func(struct kprobe *kp, 1217static __kprobes void kprobe_perf_func(struct kprobe *kp,
1218 struct pt_regs *regs) 1218 struct pt_regs *regs)
1219{ 1219{
1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1220 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
@@ -1227,11 +1227,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1227 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1228 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1228 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1229 size -= sizeof(u32); 1229 size -= sizeof(u32);
1230 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1230 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1231 "profile buffer not large enough")) 1231 "profile buffer not large enough"))
1232 return; 1232 return;
1233 1233
1234 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1234 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1235 if (!entry) 1235 if (!entry)
1236 return; 1236 return;
1237 1237
@@ -1240,11 +1240,11 @@ static __kprobes void kprobe_profile_func(struct kprobe *kp,
1240 for (i = 0; i < tp->nr_args; i++) 1240 for (i = 0; i < tp->nr_args; i++)
1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1241 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1242 1242
1243 ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); 1243 perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs);
1244} 1244}
1245 1245
1246/* Kretprobe profile handler */ 1246/* Kretprobe profile handler */
1247static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, 1247static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
1248 struct pt_regs *regs) 1248 struct pt_regs *regs)
1249{ 1249{
1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1250 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
@@ -1257,11 +1257,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1257 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1258 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1258 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1259 size -= sizeof(u32); 1259 size -= sizeof(u32);
1260 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 1260 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
1261 "profile buffer not large enough")) 1261 "profile buffer not large enough"))
1262 return; 1262 return;
1263 1263
1264 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); 1264 entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags);
1265 if (!entry) 1265 if (!entry)
1266 return; 1266 return;
1267 1267
@@ -1271,10 +1271,11 @@ static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri,
1271 for (i = 0; i < tp->nr_args; i++) 1271 for (i = 0; i < tp->nr_args; i++)
1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1272 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1273 1273
1274 ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); 1274 perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1,
1275 irq_flags, regs);
1275} 1276}
1276 1277
1277static int probe_profile_enable(struct ftrace_event_call *call) 1278static int probe_perf_enable(struct ftrace_event_call *call)
1278{ 1279{
1279 struct trace_probe *tp = (struct trace_probe *)call->data; 1280 struct trace_probe *tp = (struct trace_probe *)call->data;
1280 1281
@@ -1286,7 +1287,7 @@ static int probe_profile_enable(struct ftrace_event_call *call)
1286 return enable_kprobe(&tp->rp.kp); 1287 return enable_kprobe(&tp->rp.kp);
1287} 1288}
1288 1289
1289static void probe_profile_disable(struct ftrace_event_call *call) 1290static void probe_perf_disable(struct ftrace_event_call *call)
1290{ 1291{
1291 struct trace_probe *tp = (struct trace_probe *)call->data; 1292 struct trace_probe *tp = (struct trace_probe *)call->data;
1292 1293
@@ -1311,7 +1312,7 @@ int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
1311 kprobe_trace_func(kp, regs); 1312 kprobe_trace_func(kp, regs);
1312#ifdef CONFIG_PERF_EVENTS 1313#ifdef CONFIG_PERF_EVENTS
1313 if (tp->flags & TP_FLAG_PROFILE) 1314 if (tp->flags & TP_FLAG_PROFILE)
1314 kprobe_profile_func(kp, regs); 1315 kprobe_perf_func(kp, regs);
1315#endif 1316#endif
1316 return 0; /* We don't tweek kernel, so just return 0 */ 1317 return 0; /* We don't tweek kernel, so just return 0 */
1317} 1318}
@@ -1325,7 +1326,7 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
1325 kretprobe_trace_func(ri, regs); 1326 kretprobe_trace_func(ri, regs);
1326#ifdef CONFIG_PERF_EVENTS 1327#ifdef CONFIG_PERF_EVENTS
1327 if (tp->flags & TP_FLAG_PROFILE) 1328 if (tp->flags & TP_FLAG_PROFILE)
1328 kretprobe_profile_func(ri, regs); 1329 kretprobe_perf_func(ri, regs);
1329#endif 1330#endif
1330 return 0; /* We don't tweek kernel, so just return 0 */ 1331 return 0; /* We don't tweek kernel, so just return 0 */
1331} 1332}
@@ -1358,8 +1359,8 @@ static int register_probe_event(struct trace_probe *tp)
1358 call->unregfunc = probe_event_disable; 1359 call->unregfunc = probe_event_disable;
1359 1360
1360#ifdef CONFIG_PERF_EVENTS 1361#ifdef CONFIG_PERF_EVENTS
1361 call->profile_enable = probe_profile_enable; 1362 call->perf_event_enable = probe_perf_enable;
1362 call->profile_disable = probe_profile_disable; 1363 call->perf_event_disable = probe_perf_disable;
1363#endif 1364#endif
1364 call->data = tp; 1365 call->data = tp;
1365 ret = trace_add_event_call(call); 1366 ret = trace_add_event_call(call);
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c
index 94103cdcf9d8..d59cd6879477 100644
--- a/kernel/trace/trace_ksym.c
+++ b/kernel/trace/trace_ksym.c
@@ -23,6 +23,7 @@
23#include <linux/debugfs.h> 23#include <linux/debugfs.h>
24#include <linux/ftrace.h> 24#include <linux/ftrace.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/slab.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
27 28
28#include "trace_output.h" 29#include "trace_output.h"
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index 0acd834659ed..017fa376505d 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/mmiotrace.h> 10#include <linux/mmiotrace.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/slab.h>
12#include <linux/time.h> 13#include <linux/time.h>
13 14
14#include <asm/atomic.h> 15#include <asm/atomic.h>
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 280fea470d67..81003b4d617f 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -3,6 +3,7 @@
3#include <linux/stringify.h> 3#include <linux/stringify.h>
4#include <linux/kthread.h> 4#include <linux/kthread.h>
5#include <linux/delay.h> 5#include <linux/delay.h>
6#include <linux/slab.h>
6 7
7static inline int trace_valid_entry(struct trace_entry *entry) 8static inline int trace_valid_entry(struct trace_entry *entry)
8{ 9{
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index a4bb239eb987..96cffb269e73 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -10,6 +10,7 @@
10 10
11 11
12#include <linux/list.h> 12#include <linux/list.h>
13#include <linux/slab.h>
13#include <linux/rbtree.h> 14#include <linux/rbtree.h>
14#include <linux/debugfs.h> 15#include <linux/debugfs.h>
15#include "trace_stat.h" 16#include "trace_stat.h"
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index cba47d7935cc..4d6d711717f2 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -1,5 +1,6 @@
1#include <trace/syscall.h> 1#include <trace/syscall.h>
2#include <trace/events/syscalls.h> 2#include <trace/events/syscalls.h>
3#include <linux/slab.h>
3#include <linux/kernel.h> 4#include <linux/kernel.h>
4#include <linux/ftrace.h> 5#include <linux/ftrace.h>
5#include <linux/perf_event.h> 6#include <linux/perf_event.h>
@@ -428,12 +429,12 @@ core_initcall(init_ftrace_syscalls);
428 429
429#ifdef CONFIG_PERF_EVENTS 430#ifdef CONFIG_PERF_EVENTS
430 431
431static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); 432static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
432static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); 433static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
433static int sys_prof_refcount_enter; 434static int sys_perf_refcount_enter;
434static int sys_prof_refcount_exit; 435static int sys_perf_refcount_exit;
435 436
436static void prof_syscall_enter(struct pt_regs *regs, long id) 437static void perf_syscall_enter(struct pt_regs *regs, long id)
437{ 438{
438 struct syscall_metadata *sys_data; 439 struct syscall_metadata *sys_data;
439 struct syscall_trace_enter *rec; 440 struct syscall_trace_enter *rec;
@@ -443,7 +444,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
443 int size; 444 int size;
444 445
445 syscall_nr = syscall_get_nr(current, regs); 446 syscall_nr = syscall_get_nr(current, regs);
446 if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) 447 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
447 return; 448 return;
448 449
449 sys_data = syscall_nr_to_meta(syscall_nr); 450 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -455,11 +456,11 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
455 size = ALIGN(size + sizeof(u32), sizeof(u64)); 456 size = ALIGN(size + sizeof(u32), sizeof(u64));
456 size -= sizeof(u32); 457 size -= sizeof(u32);
457 458
458 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 459 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
459 "profile buffer not large enough")) 460 "perf buffer not large enough"))
460 return; 461 return;
461 462
462 rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, 463 rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
463 sys_data->enter_event->id, &rctx, &flags); 464 sys_data->enter_event->id, &rctx, &flags);
464 if (!rec) 465 if (!rec)
465 return; 466 return;
@@ -467,10 +468,10 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
467 rec->nr = syscall_nr; 468 rec->nr = syscall_nr;
468 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 469 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
469 (unsigned long *)&rec->args); 470 (unsigned long *)&rec->args);
470 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); 471 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
471} 472}
472 473
473int prof_sysenter_enable(struct ftrace_event_call *call) 474int perf_sysenter_enable(struct ftrace_event_call *call)
474{ 475{
475 int ret = 0; 476 int ret = 0;
476 int num; 477 int num;
@@ -478,34 +479,34 @@ int prof_sysenter_enable(struct ftrace_event_call *call)
478 num = ((struct syscall_metadata *)call->data)->syscall_nr; 479 num = ((struct syscall_metadata *)call->data)->syscall_nr;
479 480
480 mutex_lock(&syscall_trace_lock); 481 mutex_lock(&syscall_trace_lock);
481 if (!sys_prof_refcount_enter) 482 if (!sys_perf_refcount_enter)
482 ret = register_trace_sys_enter(prof_syscall_enter); 483 ret = register_trace_sys_enter(perf_syscall_enter);
483 if (ret) { 484 if (ret) {
484 pr_info("event trace: Could not activate" 485 pr_info("event trace: Could not activate"
485 "syscall entry trace point"); 486 "syscall entry trace point");
486 } else { 487 } else {
487 set_bit(num, enabled_prof_enter_syscalls); 488 set_bit(num, enabled_perf_enter_syscalls);
488 sys_prof_refcount_enter++; 489 sys_perf_refcount_enter++;
489 } 490 }
490 mutex_unlock(&syscall_trace_lock); 491 mutex_unlock(&syscall_trace_lock);
491 return ret; 492 return ret;
492} 493}
493 494
494void prof_sysenter_disable(struct ftrace_event_call *call) 495void perf_sysenter_disable(struct ftrace_event_call *call)
495{ 496{
496 int num; 497 int num;
497 498
498 num = ((struct syscall_metadata *)call->data)->syscall_nr; 499 num = ((struct syscall_metadata *)call->data)->syscall_nr;
499 500
500 mutex_lock(&syscall_trace_lock); 501 mutex_lock(&syscall_trace_lock);
501 sys_prof_refcount_enter--; 502 sys_perf_refcount_enter--;
502 clear_bit(num, enabled_prof_enter_syscalls); 503 clear_bit(num, enabled_perf_enter_syscalls);
503 if (!sys_prof_refcount_enter) 504 if (!sys_perf_refcount_enter)
504 unregister_trace_sys_enter(prof_syscall_enter); 505 unregister_trace_sys_enter(perf_syscall_enter);
505 mutex_unlock(&syscall_trace_lock); 506 mutex_unlock(&syscall_trace_lock);
506} 507}
507 508
508static void prof_syscall_exit(struct pt_regs *regs, long ret) 509static void perf_syscall_exit(struct pt_regs *regs, long ret)
509{ 510{
510 struct syscall_metadata *sys_data; 511 struct syscall_metadata *sys_data;
511 struct syscall_trace_exit *rec; 512 struct syscall_trace_exit *rec;
@@ -515,7 +516,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
515 int size; 516 int size;
516 517
517 syscall_nr = syscall_get_nr(current, regs); 518 syscall_nr = syscall_get_nr(current, regs);
518 if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) 519 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
519 return; 520 return;
520 521
521 sys_data = syscall_nr_to_meta(syscall_nr); 522 sys_data = syscall_nr_to_meta(syscall_nr);
@@ -530,11 +531,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
530 * Impossible, but be paranoid with the future 531 * Impossible, but be paranoid with the future
531 * How to put this check outside runtime? 532 * How to put this check outside runtime?
532 */ 533 */
533 if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, 534 if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
534 "exit event has grown above profile buffer size")) 535 "exit event has grown above perf buffer size"))
535 return; 536 return;
536 537
537 rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, 538 rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
538 sys_data->exit_event->id, &rctx, &flags); 539 sys_data->exit_event->id, &rctx, &flags);
539 if (!rec) 540 if (!rec)
540 return; 541 return;
@@ -542,10 +543,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
542 rec->nr = syscall_nr; 543 rec->nr = syscall_nr;
543 rec->ret = syscall_get_return_value(current, regs); 544 rec->ret = syscall_get_return_value(current, regs);
544 545
545 ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); 546 perf_trace_buf_submit(rec, size, rctx, 0, 1, flags, regs);
546} 547}
547 548
548int prof_sysexit_enable(struct ftrace_event_call *call) 549int perf_sysexit_enable(struct ftrace_event_call *call)
549{ 550{
550 int ret = 0; 551 int ret = 0;
551 int num; 552 int num;
@@ -553,30 +554,30 @@ int prof_sysexit_enable(struct ftrace_event_call *call)
553 num = ((struct syscall_metadata *)call->data)->syscall_nr; 554 num = ((struct syscall_metadata *)call->data)->syscall_nr;
554 555
555 mutex_lock(&syscall_trace_lock); 556 mutex_lock(&syscall_trace_lock);
556 if (!sys_prof_refcount_exit) 557 if (!sys_perf_refcount_exit)
557 ret = register_trace_sys_exit(prof_syscall_exit); 558 ret = register_trace_sys_exit(perf_syscall_exit);
558 if (ret) { 559 if (ret) {
559 pr_info("event trace: Could not activate" 560 pr_info("event trace: Could not activate"
560 "syscall exit trace point"); 561 "syscall exit trace point");
561 } else { 562 } else {
562 set_bit(num, enabled_prof_exit_syscalls); 563 set_bit(num, enabled_perf_exit_syscalls);
563 sys_prof_refcount_exit++; 564 sys_perf_refcount_exit++;
564 } 565 }
565 mutex_unlock(&syscall_trace_lock); 566 mutex_unlock(&syscall_trace_lock);
566 return ret; 567 return ret;
567} 568}
568 569
569void prof_sysexit_disable(struct ftrace_event_call *call) 570void perf_sysexit_disable(struct ftrace_event_call *call)
570{ 571{
571 int num; 572 int num;
572 573
573 num = ((struct syscall_metadata *)call->data)->syscall_nr; 574 num = ((struct syscall_metadata *)call->data)->syscall_nr;
574 575
575 mutex_lock(&syscall_trace_lock); 576 mutex_lock(&syscall_trace_lock);
576 sys_prof_refcount_exit--; 577 sys_perf_refcount_exit--;
577 clear_bit(num, enabled_prof_exit_syscalls); 578 clear_bit(num, enabled_perf_exit_syscalls);
578 if (!sys_prof_refcount_exit) 579 if (!sys_perf_refcount_exit)
579 unregister_trace_sys_exit(prof_syscall_exit); 580 unregister_trace_sys_exit(perf_syscall_exit);
580 mutex_unlock(&syscall_trace_lock); 581 mutex_unlock(&syscall_trace_lock);
581} 582}
582 583
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index 40cafb07dffd..cc2d2faa7d9e 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -9,6 +9,7 @@
9#include <trace/events/workqueue.h> 9#include <trace/events/workqueue.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12#include <linux/slab.h>
12#include <linux/kref.h> 13#include <linux/kref.h>
13#include "trace_stat.h" 14#include "trace_stat.h"
14#include "trace.h" 15#include "trace.h"
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index dee48658805c..5bfb213984b2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -774,7 +774,7 @@ void flush_delayed_work(struct delayed_work *dwork)
774{ 774{
775 if (del_timer_sync(&dwork->timer)) { 775 if (del_timer_sync(&dwork->timer)) {
776 struct cpu_workqueue_struct *cwq; 776 struct cpu_workqueue_struct *cwq;
777 cwq = wq_per_cpu(keventd_wq, get_cpu()); 777 cwq = wq_per_cpu(get_wq_data(&dwork->work)->wq, get_cpu());
778 __queue_work(cwq, &dwork->work); 778 __queue_work(cwq, &dwork->work);
779 put_cpu(); 779 put_cpu();
780 } 780 }