aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/async.c18
-rw-r--r--kernel/auditsc.c1
-rw-r--r--kernel/cgroup.c435
-rw-r--r--kernel/cgroup_debug.c2
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/cpuset.c254
-rw-r--r--kernel/exec_domain.c23
-rw-r--r--kernel/exit.c250
-rw-r--r--kernel/fork.c77
-rw-r--r--kernel/futex.c201
-rw-r--r--kernel/irq/Makefile1
-rw-r--r--kernel/irq/chip.c12
-rw-r--r--kernel/irq/handle.c93
-rw-r--r--kernel/irq/internals.h10
-rw-r--r--kernel/irq/manage.c235
-rw-r--r--kernel/irq/migration.c12
-rw-r--r--kernel/irq/numa_migrate.c30
-rw-r--r--kernel/irq/pm.c79
-rw-r--r--kernel/irq/proc.c4
-rw-r--r--kernel/irq/spurious.c14
-rw-r--r--kernel/kexec.c24
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/latencytop.c83
-rw-r--r--kernel/lockdep.c538
-rw-r--r--kernel/lockdep_internals.h45
-rw-r--r--kernel/lockdep_proc.c22
-rw-r--r--kernel/lockdep_states.h9
-rw-r--r--kernel/module.c89
-rw-r--r--kernel/mutex-debug.c9
-rw-r--r--kernel/mutex-debug.h18
-rw-r--r--kernel/mutex.c121
-rw-r--r--kernel/mutex.h22
-rw-r--r--kernel/ns_cgroup.c14
-rw-r--r--kernel/panic.c123
-rw-r--r--kernel/pid.c33
-rw-r--r--kernel/pid_namespace.c15
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/power/disk.c143
-rw-r--r--kernel/power/main.c55
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/power/swsusp.c18
-rw-r--r--kernel/printk.c19
-rw-r--r--kernel/ptrace.c101
-rw-r--r--kernel/rcupdate.c44
-rw-r--r--kernel/rcutorture.c25
-rw-r--r--kernel/relay.c10
-rw-r--r--kernel/sched.c1090
-rw-r--r--kernel/sched_clock.c30
-rw-r--r--kernel/sched_cpupri.h2
-rw-r--r--kernel/sched_debug.c8
-rw-r--r--kernel/sched_fair.c59
-rw-r--r--kernel/sched_features.h4
-rw-r--r--kernel/sched_rt.c569
-rw-r--r--kernel/sched_stats.h7
-rw-r--r--kernel/signal.c71
-rw-r--r--kernel/slow-work.c640
-rw-r--r--kernel/smp.c432
-rw-r--r--kernel/softirq.c11
-rw-r--r--kernel/spinlock.c18
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--kernel/sys.c5
-rw-r--r--kernel/sysctl.c26
-rw-r--r--kernel/sysctl_check.c1
-rw-r--r--kernel/time/Makefile2
-rw-r--r--kernel/time/clockevents.c20
-rw-r--r--kernel/time/clocksource.c76
-rw-r--r--kernel/time/ntp.c444
-rw-r--r--kernel/time/timecompare.c191
-rw-r--r--kernel/timer.c178
-rw-r--r--kernel/trace/Kconfig9
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/trace/trace_functions_graph.c75
-rw-r--r--kernel/user.c2
-rw-r--r--kernel/utsname_sysctl.c2
-rw-r--r--kernel/workqueue.c47
77 files changed, 5088 insertions, 2293 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index e4791b3ba55d..bab1dffe37e9 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -93,6 +93,7 @@ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
93obj-$(CONFIG_FUNCTION_TRACER) += trace/ 93obj-$(CONFIG_FUNCTION_TRACER) += trace/
94obj-$(CONFIG_TRACING) += trace/ 94obj-$(CONFIG_TRACING) += trace/
95obj-$(CONFIG_SMP) += sched_cpupri.o 95obj-$(CONFIG_SMP) += sched_cpupri.o
96obj-$(CONFIG_SLOW_WORK) += slow-work.o
96 97
97ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) 98ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
98# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is 99# According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
diff --git a/kernel/async.c b/kernel/async.c
index f565891f2c9b..968ef9457d4e 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -49,6 +49,7 @@ asynchronous and synchronous parts of the kernel.
49*/ 49*/
50 50
51#include <linux/async.h> 51#include <linux/async.h>
52#include <linux/bug.h>
52#include <linux/module.h> 53#include <linux/module.h>
53#include <linux/wait.h> 54#include <linux/wait.h>
54#include <linux/sched.h> 55#include <linux/sched.h>
@@ -387,20 +388,11 @@ static int async_manager_thread(void *unused)
387 388
388static int __init async_init(void) 389static int __init async_init(void)
389{ 390{
390 if (async_enabled) 391 async_enabled =
391 if (IS_ERR(kthread_run(async_manager_thread, NULL, 392 !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
392 "async/mgr")))
393 async_enabled = 0;
394 return 0;
395}
396 393
397static int __init setup_async(char *str) 394 WARN_ON(!async_enabled);
398{ 395 return 0;
399 async_enabled = 1;
400 return 1;
401} 396}
402 397
403__setup("fastboot", setup_async);
404
405
406core_initcall(async_init); 398core_initcall(async_init);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 8cbddff6c283..2bfc64786765 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -66,6 +66,7 @@
66#include <linux/syscalls.h> 66#include <linux/syscalls.h>
67#include <linux/inotify.h> 67#include <linux/inotify.h>
68#include <linux/capability.h> 68#include <linux/capability.h>
69#include <linux/fs_struct.h>
69 70
70#include "audit.h" 71#include "audit.h"
71 72
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 9edb5c4b79b4..382109b5baeb 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -94,7 +94,6 @@ struct cgroupfs_root {
94 char release_agent_path[PATH_MAX]; 94 char release_agent_path[PATH_MAX];
95}; 95};
96 96
97
98/* 97/*
99 * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the 98 * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the
100 * subsystems that are otherwise unattached - it never has more than a 99 * subsystems that are otherwise unattached - it never has more than a
@@ -102,6 +101,39 @@ struct cgroupfs_root {
102 */ 101 */
103static struct cgroupfs_root rootnode; 102static struct cgroupfs_root rootnode;
104 103
104/*
105 * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when
106 * cgroup_subsys->use_id != 0.
107 */
108#define CSS_ID_MAX (65535)
109struct css_id {
110 /*
111 * The css to which this ID points. This pointer is set to valid value
112 * after cgroup is populated. If cgroup is removed, this will be NULL.
113 * This pointer is expected to be RCU-safe because destroy()
114 * is called after synchronize_rcu(). But for safe use, css_is_removed()
115 * css_tryget() should be used for avoiding race.
116 */
117 struct cgroup_subsys_state *css;
118 /*
119 * ID of this css.
120 */
121 unsigned short id;
122 /*
123 * Depth in hierarchy which this ID belongs to.
124 */
125 unsigned short depth;
126 /*
127 * ID is freed by RCU. (and lookup routine is RCU safe.)
128 */
129 struct rcu_head rcu_head;
130 /*
131 * Hierarchy of CSS ID belongs to.
132 */
133 unsigned short stack[0]; /* Array of Length (depth+1) */
134};
135
136
105/* The list of hierarchy roots */ 137/* The list of hierarchy roots */
106 138
107static LIST_HEAD(roots); 139static LIST_HEAD(roots);
@@ -185,6 +217,8 @@ struct cg_cgroup_link {
185static struct css_set init_css_set; 217static struct css_set init_css_set;
186static struct cg_cgroup_link init_css_set_link; 218static struct cg_cgroup_link init_css_set_link;
187 219
220static int cgroup_subsys_init_idr(struct cgroup_subsys *ss);
221
188/* css_set_lock protects the list of css_set objects, and the 222/* css_set_lock protects the list of css_set objects, and the
189 * chain of tasks off each css_set. Nests outside task->alloc_lock 223 * chain of tasks off each css_set. Nests outside task->alloc_lock
190 * due to cgroup_iter_start() */ 224 * due to cgroup_iter_start() */
@@ -567,6 +601,9 @@ static struct backing_dev_info cgroup_backing_dev_info = {
567 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 601 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
568}; 602};
569 603
604static int alloc_css_id(struct cgroup_subsys *ss,
605 struct cgroup *parent, struct cgroup *child);
606
570static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) 607static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
571{ 608{
572 struct inode *inode = new_inode(sb); 609 struct inode *inode = new_inode(sb);
@@ -585,13 +622,18 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
585 * Call subsys's pre_destroy handler. 622 * Call subsys's pre_destroy handler.
586 * This is called before css refcnt check. 623 * This is called before css refcnt check.
587 */ 624 */
588static void cgroup_call_pre_destroy(struct cgroup *cgrp) 625static int cgroup_call_pre_destroy(struct cgroup *cgrp)
589{ 626{
590 struct cgroup_subsys *ss; 627 struct cgroup_subsys *ss;
628 int ret = 0;
629
591 for_each_subsys(cgrp->root, ss) 630 for_each_subsys(cgrp->root, ss)
592 if (ss->pre_destroy) 631 if (ss->pre_destroy) {
593 ss->pre_destroy(ss, cgrp); 632 ret = ss->pre_destroy(ss, cgrp);
594 return; 633 if (ret)
634 break;
635 }
636 return ret;
595} 637}
596 638
597static void free_cgroup_rcu(struct rcu_head *obj) 639static void free_cgroup_rcu(struct rcu_head *obj)
@@ -685,6 +727,22 @@ static void cgroup_d_remove_dir(struct dentry *dentry)
685 remove_dir(dentry); 727 remove_dir(dentry);
686} 728}
687 729
730/*
731 * A queue for waiters to do rmdir() cgroup. A tasks will sleep when
732 * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some
733 * reference to css->refcnt. In general, this refcnt is expected to goes down
734 * to zero, soon.
735 *
736 * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex;
737 */
738DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq);
739
740static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp)
741{
742 if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags)))
743 wake_up_all(&cgroup_rmdir_waitq);
744}
745
688static int rebind_subsystems(struct cgroupfs_root *root, 746static int rebind_subsystems(struct cgroupfs_root *root,
689 unsigned long final_bits) 747 unsigned long final_bits)
690{ 748{
@@ -857,16 +915,16 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
857 } 915 }
858 916
859 ret = rebind_subsystems(root, opts.subsys_bits); 917 ret = rebind_subsystems(root, opts.subsys_bits);
918 if (ret)
919 goto out_unlock;
860 920
861 /* (re)populate subsystem files */ 921 /* (re)populate subsystem files */
862 if (!ret) 922 cgroup_populate_dir(cgrp);
863 cgroup_populate_dir(cgrp);
864 923
865 if (opts.release_agent) 924 if (opts.release_agent)
866 strcpy(root->release_agent_path, opts.release_agent); 925 strcpy(root->release_agent_path, opts.release_agent);
867 out_unlock: 926 out_unlock:
868 if (opts.release_agent) 927 kfree(opts.release_agent);
869 kfree(opts.release_agent);
870 mutex_unlock(&cgroup_mutex); 928 mutex_unlock(&cgroup_mutex);
871 mutex_unlock(&cgrp->dentry->d_inode->i_mutex); 929 mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
872 return ret; 930 return ret;
@@ -969,15 +1027,13 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
969 /* First find the desired set of subsystems */ 1027 /* First find the desired set of subsystems */
970 ret = parse_cgroupfs_options(data, &opts); 1028 ret = parse_cgroupfs_options(data, &opts);
971 if (ret) { 1029 if (ret) {
972 if (opts.release_agent) 1030 kfree(opts.release_agent);
973 kfree(opts.release_agent);
974 return ret; 1031 return ret;
975 } 1032 }
976 1033
977 root = kzalloc(sizeof(*root), GFP_KERNEL); 1034 root = kzalloc(sizeof(*root), GFP_KERNEL);
978 if (!root) { 1035 if (!root) {
979 if (opts.release_agent) 1036 kfree(opts.release_agent);
980 kfree(opts.release_agent);
981 return -ENOMEM; 1037 return -ENOMEM;
982 } 1038 }
983 1039
@@ -1071,7 +1127,8 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1071 mutex_unlock(&cgroup_mutex); 1127 mutex_unlock(&cgroup_mutex);
1072 } 1128 }
1073 1129
1074 return simple_set_mnt(mnt, sb); 1130 simple_set_mnt(mnt, sb);
1131 return 0;
1075 1132
1076 free_cg_links: 1133 free_cg_links:
1077 free_cg_links(&tmp_cg_links); 1134 free_cg_links(&tmp_cg_links);
@@ -1279,6 +1336,12 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1279 set_bit(CGRP_RELEASABLE, &oldcgrp->flags); 1336 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1280 synchronize_rcu(); 1337 synchronize_rcu();
1281 put_css_set(cg); 1338 put_css_set(cg);
1339
1340 /*
1341 * wake up rmdir() waiter. the rmdir should fail since the cgroup
1342 * is no longer empty.
1343 */
1344 cgroup_wakeup_rmdir_waiters(cgrp);
1282 return 0; 1345 return 0;
1283} 1346}
1284 1347
@@ -1624,10 +1687,10 @@ static struct inode_operations cgroup_dir_inode_operations = {
1624 .rename = cgroup_rename, 1687 .rename = cgroup_rename,
1625}; 1688};
1626 1689
1627static int cgroup_create_file(struct dentry *dentry, int mode, 1690static int cgroup_create_file(struct dentry *dentry, mode_t mode,
1628 struct super_block *sb) 1691 struct super_block *sb)
1629{ 1692{
1630 static struct dentry_operations cgroup_dops = { 1693 static const struct dentry_operations cgroup_dops = {
1631 .d_iput = cgroup_diput, 1694 .d_iput = cgroup_diput,
1632 }; 1695 };
1633 1696
@@ -1670,7 +1733,7 @@ static int cgroup_create_file(struct dentry *dentry, int mode,
1670 * @mode: mode to set on new directory. 1733 * @mode: mode to set on new directory.
1671 */ 1734 */
1672static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, 1735static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
1673 int mode) 1736 mode_t mode)
1674{ 1737{
1675 struct dentry *parent; 1738 struct dentry *parent;
1676 int error = 0; 1739 int error = 0;
@@ -1688,6 +1751,33 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
1688 return error; 1751 return error;
1689} 1752}
1690 1753
1754/**
1755 * cgroup_file_mode - deduce file mode of a control file
1756 * @cft: the control file in question
1757 *
1758 * returns cft->mode if ->mode is not 0
1759 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
1760 * returns S_IRUGO if it has only a read handler
1761 * returns S_IWUSR if it has only a write hander
1762 */
1763static mode_t cgroup_file_mode(const struct cftype *cft)
1764{
1765 mode_t mode = 0;
1766
1767 if (cft->mode)
1768 return cft->mode;
1769
1770 if (cft->read || cft->read_u64 || cft->read_s64 ||
1771 cft->read_map || cft->read_seq_string)
1772 mode |= S_IRUGO;
1773
1774 if (cft->write || cft->write_u64 || cft->write_s64 ||
1775 cft->write_string || cft->trigger)
1776 mode |= S_IWUSR;
1777
1778 return mode;
1779}
1780
1691int cgroup_add_file(struct cgroup *cgrp, 1781int cgroup_add_file(struct cgroup *cgrp,
1692 struct cgroup_subsys *subsys, 1782 struct cgroup_subsys *subsys,
1693 const struct cftype *cft) 1783 const struct cftype *cft)
@@ -1695,6 +1785,7 @@ int cgroup_add_file(struct cgroup *cgrp,
1695 struct dentry *dir = cgrp->dentry; 1785 struct dentry *dir = cgrp->dentry;
1696 struct dentry *dentry; 1786 struct dentry *dentry;
1697 int error; 1787 int error;
1788 mode_t mode;
1698 1789
1699 char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; 1790 char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
1700 if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { 1791 if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) {
@@ -1705,7 +1796,8 @@ int cgroup_add_file(struct cgroup *cgrp,
1705 BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); 1796 BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex));
1706 dentry = lookup_one_len(name, dir, strlen(name)); 1797 dentry = lookup_one_len(name, dir, strlen(name));
1707 if (!IS_ERR(dentry)) { 1798 if (!IS_ERR(dentry)) {
1708 error = cgroup_create_file(dentry, 0644 | S_IFREG, 1799 mode = cgroup_file_mode(cft);
1800 error = cgroup_create_file(dentry, mode | S_IFREG,
1709 cgrp->root->sb); 1801 cgrp->root->sb);
1710 if (!error) 1802 if (!error)
1711 dentry->d_fsdata = (void *)cft; 1803 dentry->d_fsdata = (void *)cft;
@@ -2287,6 +2379,7 @@ static struct cftype files[] = {
2287 .write_u64 = cgroup_tasks_write, 2379 .write_u64 = cgroup_tasks_write,
2288 .release = cgroup_tasks_release, 2380 .release = cgroup_tasks_release,
2289 .private = FILE_TASKLIST, 2381 .private = FILE_TASKLIST,
2382 .mode = S_IRUGO | S_IWUSR,
2290 }, 2383 },
2291 2384
2292 { 2385 {
@@ -2326,6 +2419,17 @@ static int cgroup_populate_dir(struct cgroup *cgrp)
2326 if (ss->populate && (err = ss->populate(ss, cgrp)) < 0) 2419 if (ss->populate && (err = ss->populate(ss, cgrp)) < 0)
2327 return err; 2420 return err;
2328 } 2421 }
2422 /* This cgroup is ready now */
2423 for_each_subsys(cgrp->root, ss) {
2424 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2425 /*
2426 * Update id->css pointer and make this css visible from
2427 * CSS ID functions. This pointer will be dereferened
2428 * from RCU-read-side without locks.
2429 */
2430 if (css->id)
2431 rcu_assign_pointer(css->id->css, css);
2432 }
2329 2433
2330 return 0; 2434 return 0;
2331} 2435}
@@ -2337,6 +2441,7 @@ static void init_cgroup_css(struct cgroup_subsys_state *css,
2337 css->cgroup = cgrp; 2441 css->cgroup = cgrp;
2338 atomic_set(&css->refcnt, 1); 2442 atomic_set(&css->refcnt, 1);
2339 css->flags = 0; 2443 css->flags = 0;
2444 css->id = NULL;
2340 if (cgrp == dummytop) 2445 if (cgrp == dummytop)
2341 set_bit(CSS_ROOT, &css->flags); 2446 set_bit(CSS_ROOT, &css->flags);
2342 BUG_ON(cgrp->subsys[ss->subsys_id]); 2447 BUG_ON(cgrp->subsys[ss->subsys_id]);
@@ -2375,7 +2480,7 @@ static void cgroup_unlock_hierarchy(struct cgroupfs_root *root)
2375 * Must be called with the mutex on the parent inode held 2480 * Must be called with the mutex on the parent inode held
2376 */ 2481 */
2377static long cgroup_create(struct cgroup *parent, struct dentry *dentry, 2482static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2378 int mode) 2483 mode_t mode)
2379{ 2484{
2380 struct cgroup *cgrp; 2485 struct cgroup *cgrp;
2381 struct cgroupfs_root *root = parent->root; 2486 struct cgroupfs_root *root = parent->root;
@@ -2412,6 +2517,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2412 goto err_destroy; 2517 goto err_destroy;
2413 } 2518 }
2414 init_cgroup_css(css, ss, cgrp); 2519 init_cgroup_css(css, ss, cgrp);
2520 if (ss->use_id)
2521 if (alloc_css_id(ss, parent, cgrp))
2522 goto err_destroy;
2523 /* At error, ->destroy() callback has to free assigned ID. */
2415 } 2524 }
2416 2525
2417 cgroup_lock_hierarchy(root); 2526 cgroup_lock_hierarchy(root);
@@ -2554,9 +2663,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2554 struct cgroup *cgrp = dentry->d_fsdata; 2663 struct cgroup *cgrp = dentry->d_fsdata;
2555 struct dentry *d; 2664 struct dentry *d;
2556 struct cgroup *parent; 2665 struct cgroup *parent;
2666 DEFINE_WAIT(wait);
2667 int ret;
2557 2668
2558 /* the vfs holds both inode->i_mutex already */ 2669 /* the vfs holds both inode->i_mutex already */
2559 2670again:
2560 mutex_lock(&cgroup_mutex); 2671 mutex_lock(&cgroup_mutex);
2561 if (atomic_read(&cgrp->count) != 0) { 2672 if (atomic_read(&cgrp->count) != 0) {
2562 mutex_unlock(&cgroup_mutex); 2673 mutex_unlock(&cgroup_mutex);
@@ -2572,17 +2683,39 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
2572 * Call pre_destroy handlers of subsys. Notify subsystems 2683 * Call pre_destroy handlers of subsys. Notify subsystems
2573 * that rmdir() request comes. 2684 * that rmdir() request comes.
2574 */ 2685 */
2575 cgroup_call_pre_destroy(cgrp); 2686 ret = cgroup_call_pre_destroy(cgrp);
2687 if (ret)
2688 return ret;
2576 2689
2577 mutex_lock(&cgroup_mutex); 2690 mutex_lock(&cgroup_mutex);
2578 parent = cgrp->parent; 2691 parent = cgrp->parent;
2579 2692 if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) {
2580 if (atomic_read(&cgrp->count)
2581 || !list_empty(&cgrp->children)
2582 || !cgroup_clear_css_refs(cgrp)) {
2583 mutex_unlock(&cgroup_mutex); 2693 mutex_unlock(&cgroup_mutex);
2584 return -EBUSY; 2694 return -EBUSY;
2585 } 2695 }
2696 /*
2697 * css_put/get is provided for subsys to grab refcnt to css. In typical
2698 * case, subsystem has no reference after pre_destroy(). But, under
2699 * hierarchy management, some *temporal* refcnt can be hold.
2700 * To avoid returning -EBUSY to a user, waitqueue is used. If subsys
2701 * is really busy, it should return -EBUSY at pre_destroy(). wake_up
2702 * is called when css_put() is called and refcnt goes down to 0.
2703 */
2704 set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2705 prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE);
2706
2707 if (!cgroup_clear_css_refs(cgrp)) {
2708 mutex_unlock(&cgroup_mutex);
2709 schedule();
2710 finish_wait(&cgroup_rmdir_waitq, &wait);
2711 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2712 if (signal_pending(current))
2713 return -EINTR;
2714 goto again;
2715 }
2716 /* NO css_tryget() can success after here. */
2717 finish_wait(&cgroup_rmdir_waitq, &wait);
2718 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags);
2586 2719
2587 spin_lock(&release_list_lock); 2720 spin_lock(&release_list_lock);
2588 set_bit(CGRP_REMOVED, &cgrp->flags); 2721 set_bit(CGRP_REMOVED, &cgrp->flags);
@@ -2707,6 +2840,8 @@ int __init cgroup_init(void)
2707 struct cgroup_subsys *ss = subsys[i]; 2840 struct cgroup_subsys *ss = subsys[i];
2708 if (!ss->early_init) 2841 if (!ss->early_init)
2709 cgroup_init_subsys(ss); 2842 cgroup_init_subsys(ss);
2843 if (ss->use_id)
2844 cgroup_subsys_init_idr(ss);
2710 } 2845 }
2711 2846
2712 /* Add init_css_set to the hash table */ 2847 /* Add init_css_set to the hash table */
@@ -3083,18 +3218,19 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
3083} 3218}
3084 3219
3085/** 3220/**
3086 * cgroup_is_descendant - see if @cgrp is a descendant of current task's cgrp 3221 * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp
3087 * @cgrp: the cgroup in question 3222 * @cgrp: the cgroup in question
3223 * @task: the task in question
3088 * 3224 *
3089 * See if @cgrp is a descendant of the current task's cgroup in 3225 * See if @cgrp is a descendant of @task's cgroup in the appropriate
3090 * the appropriate hierarchy. 3226 * hierarchy.
3091 * 3227 *
3092 * If we are sending in dummytop, then presumably we are creating 3228 * If we are sending in dummytop, then presumably we are creating
3093 * the top cgroup in the subsystem. 3229 * the top cgroup in the subsystem.
3094 * 3230 *
3095 * Called only by the ns (nsproxy) cgroup. 3231 * Called only by the ns (nsproxy) cgroup.
3096 */ 3232 */
3097int cgroup_is_descendant(const struct cgroup *cgrp) 3233int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task)
3098{ 3234{
3099 int ret; 3235 int ret;
3100 struct cgroup *target; 3236 struct cgroup *target;
@@ -3104,7 +3240,7 @@ int cgroup_is_descendant(const struct cgroup *cgrp)
3104 return 1; 3240 return 1;
3105 3241
3106 get_first_subsys(cgrp, NULL, &subsys_id); 3242 get_first_subsys(cgrp, NULL, &subsys_id);
3107 target = task_cgroup(current, subsys_id); 3243 target = task_cgroup(task, subsys_id);
3108 while (cgrp != target && cgrp!= cgrp->top_cgroup) 3244 while (cgrp != target && cgrp!= cgrp->top_cgroup)
3109 cgrp = cgrp->parent; 3245 cgrp = cgrp->parent;
3110 ret = (cgrp == target); 3246 ret = (cgrp == target);
@@ -3137,10 +3273,12 @@ void __css_put(struct cgroup_subsys_state *css)
3137{ 3273{
3138 struct cgroup *cgrp = css->cgroup; 3274 struct cgroup *cgrp = css->cgroup;
3139 rcu_read_lock(); 3275 rcu_read_lock();
3140 if ((atomic_dec_return(&css->refcnt) == 1) && 3276 if (atomic_dec_return(&css->refcnt) == 1) {
3141 notify_on_release(cgrp)) { 3277 if (notify_on_release(cgrp)) {
3142 set_bit(CGRP_RELEASABLE, &cgrp->flags); 3278 set_bit(CGRP_RELEASABLE, &cgrp->flags);
3143 check_for_release(cgrp); 3279 check_for_release(cgrp);
3280 }
3281 cgroup_wakeup_rmdir_waiters(cgrp);
3144 } 3282 }
3145 rcu_read_unlock(); 3283 rcu_read_unlock();
3146} 3284}
@@ -3240,3 +3378,232 @@ static int __init cgroup_disable(char *str)
3240 return 1; 3378 return 1;
3241} 3379}
3242__setup("cgroup_disable=", cgroup_disable); 3380__setup("cgroup_disable=", cgroup_disable);
3381
3382/*
3383 * Functons for CSS ID.
3384 */
3385
3386/*
3387 *To get ID other than 0, this should be called when !cgroup_is_removed().
3388 */
3389unsigned short css_id(struct cgroup_subsys_state *css)
3390{
3391 struct css_id *cssid = rcu_dereference(css->id);
3392
3393 if (cssid)
3394 return cssid->id;
3395 return 0;
3396}
3397
3398unsigned short css_depth(struct cgroup_subsys_state *css)
3399{
3400 struct css_id *cssid = rcu_dereference(css->id);
3401
3402 if (cssid)
3403 return cssid->depth;
3404 return 0;
3405}
3406
3407bool css_is_ancestor(struct cgroup_subsys_state *child,
3408 const struct cgroup_subsys_state *root)
3409{
3410 struct css_id *child_id = rcu_dereference(child->id);
3411 struct css_id *root_id = rcu_dereference(root->id);
3412
3413 if (!child_id || !root_id || (child_id->depth < root_id->depth))
3414 return false;
3415 return child_id->stack[root_id->depth] == root_id->id;
3416}
3417
3418static void __free_css_id_cb(struct rcu_head *head)
3419{
3420 struct css_id *id;
3421
3422 id = container_of(head, struct css_id, rcu_head);
3423 kfree(id);
3424}
3425
3426void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
3427{
3428 struct css_id *id = css->id;
3429 /* When this is called before css_id initialization, id can be NULL */
3430 if (!id)
3431 return;
3432
3433 BUG_ON(!ss->use_id);
3434
3435 rcu_assign_pointer(id->css, NULL);
3436 rcu_assign_pointer(css->id, NULL);
3437 spin_lock(&ss->id_lock);
3438 idr_remove(&ss->idr, id->id);
3439 spin_unlock(&ss->id_lock);
3440 call_rcu(&id->rcu_head, __free_css_id_cb);
3441}
3442
3443/*
3444 * This is called by init or create(). Then, calls to this function are
3445 * always serialized (By cgroup_mutex() at create()).
3446 */
3447
3448static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
3449{
3450 struct css_id *newid;
3451 int myid, error, size;
3452
3453 BUG_ON(!ss->use_id);
3454
3455 size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1);
3456 newid = kzalloc(size, GFP_KERNEL);
3457 if (!newid)
3458 return ERR_PTR(-ENOMEM);
3459 /* get id */
3460 if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) {
3461 error = -ENOMEM;
3462 goto err_out;
3463 }
3464 spin_lock(&ss->id_lock);
3465 /* Don't use 0. allocates an ID of 1-65535 */
3466 error = idr_get_new_above(&ss->idr, newid, 1, &myid);
3467 spin_unlock(&ss->id_lock);
3468
3469 /* Returns error when there are no free spaces for new ID.*/
3470 if (error) {
3471 error = -ENOSPC;
3472 goto err_out;
3473 }
3474 if (myid > CSS_ID_MAX)
3475 goto remove_idr;
3476
3477 newid->id = myid;
3478 newid->depth = depth;
3479 return newid;
3480remove_idr:
3481 error = -ENOSPC;
3482 spin_lock(&ss->id_lock);
3483 idr_remove(&ss->idr, myid);
3484 spin_unlock(&ss->id_lock);
3485err_out:
3486 kfree(newid);
3487 return ERR_PTR(error);
3488
3489}
3490
3491static int __init cgroup_subsys_init_idr(struct cgroup_subsys *ss)
3492{
3493 struct css_id *newid;
3494 struct cgroup_subsys_state *rootcss;
3495
3496 spin_lock_init(&ss->id_lock);
3497 idr_init(&ss->idr);
3498
3499 rootcss = init_css_set.subsys[ss->subsys_id];
3500 newid = get_new_cssid(ss, 0);
3501 if (IS_ERR(newid))
3502 return PTR_ERR(newid);
3503
3504 newid->stack[0] = newid->id;
3505 newid->css = rootcss;
3506 rootcss->id = newid;
3507 return 0;
3508}
3509
3510static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
3511 struct cgroup *child)
3512{
3513 int subsys_id, i, depth = 0;
3514 struct cgroup_subsys_state *parent_css, *child_css;
3515 struct css_id *child_id, *parent_id = NULL;
3516
3517 subsys_id = ss->subsys_id;
3518 parent_css = parent->subsys[subsys_id];
3519 child_css = child->subsys[subsys_id];
3520 depth = css_depth(parent_css) + 1;
3521 parent_id = parent_css->id;
3522
3523 child_id = get_new_cssid(ss, depth);
3524 if (IS_ERR(child_id))
3525 return PTR_ERR(child_id);
3526
3527 for (i = 0; i < depth; i++)
3528 child_id->stack[i] = parent_id->stack[i];
3529 child_id->stack[depth] = child_id->id;
3530 /*
3531 * child_id->css pointer will be set after this cgroup is available
3532 * see cgroup_populate_dir()
3533 */
3534 rcu_assign_pointer(child_css->id, child_id);
3535
3536 return 0;
3537}
3538
3539/**
3540 * css_lookup - lookup css by id
3541 * @ss: cgroup subsys to be looked into.
3542 * @id: the id
3543 *
3544 * Returns pointer to cgroup_subsys_state if there is valid one with id.
3545 * NULL if not. Should be called under rcu_read_lock()
3546 */
3547struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
3548{
3549 struct css_id *cssid = NULL;
3550
3551 BUG_ON(!ss->use_id);
3552 cssid = idr_find(&ss->idr, id);
3553
3554 if (unlikely(!cssid))
3555 return NULL;
3556
3557 return rcu_dereference(cssid->css);
3558}
3559
3560/**
3561 * css_get_next - lookup next cgroup under specified hierarchy.
3562 * @ss: pointer to subsystem
3563 * @id: current position of iteration.
3564 * @root: pointer to css. search tree under this.
3565 * @foundid: position of found object.
3566 *
3567 * Search next css under the specified hierarchy of rootid. Calling under
3568 * rcu_read_lock() is necessary. Returns NULL if it reaches the end.
3569 */
3570struct cgroup_subsys_state *
3571css_get_next(struct cgroup_subsys *ss, int id,
3572 struct cgroup_subsys_state *root, int *foundid)
3573{
3574 struct cgroup_subsys_state *ret = NULL;
3575 struct css_id *tmp;
3576 int tmpid;
3577 int rootid = css_id(root);
3578 int depth = css_depth(root);
3579
3580 if (!rootid)
3581 return NULL;
3582
3583 BUG_ON(!ss->use_id);
3584 /* fill start point for scan */
3585 tmpid = id;
3586 while (1) {
3587 /*
3588 * scan next entry from bitmap(tree), tmpid is updated after
3589 * idr_get_next().
3590 */
3591 spin_lock(&ss->id_lock);
3592 tmp = idr_get_next(&ss->idr, &tmpid);
3593 spin_unlock(&ss->id_lock);
3594
3595 if (!tmp)
3596 break;
3597 if (tmp->depth >= depth && tmp->stack[depth] == rootid) {
3598 ret = rcu_dereference(tmp->css);
3599 if (ret) {
3600 *foundid = tmpid;
3601 break;
3602 }
3603 }
3604 /* continue to scan from next id */
3605 tmpid = tmpid + 1;
3606 }
3607 return ret;
3608}
3609
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
index daca6209202d..0c92d797baa6 100644
--- a/kernel/cgroup_debug.c
+++ b/kernel/cgroup_debug.c
@@ -40,9 +40,7 @@ static u64 taskcount_read(struct cgroup *cont, struct cftype *cft)
40{ 40{
41 u64 count; 41 u64 count;
42 42
43 cgroup_lock();
44 count = cgroup_task_count(cont); 43 count = cgroup_task_count(cont);
45 cgroup_unlock();
46 return count; 44 return count;
47} 45}
48 46
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 79e40f00dcb8..395b6974dc8d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -281,7 +281,7 @@ int __ref cpu_down(unsigned int cpu)
281 goto out; 281 goto out;
282 } 282 }
283 283
284 cpu_clear(cpu, cpu_active_map); 284 set_cpu_active(cpu, false);
285 285
286 /* 286 /*
287 * Make sure the all cpus did the reschedule and are not 287 * Make sure the all cpus did the reschedule and are not
@@ -296,7 +296,7 @@ int __ref cpu_down(unsigned int cpu)
296 err = _cpu_down(cpu, 0); 296 err = _cpu_down(cpu, 0);
297 297
298 if (cpu_online(cpu)) 298 if (cpu_online(cpu))
299 cpu_set(cpu, cpu_active_map); 299 set_cpu_active(cpu, true);
300 300
301out: 301out:
302 cpu_maps_update_done(); 302 cpu_maps_update_done();
@@ -333,7 +333,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
333 goto out_notify; 333 goto out_notify;
334 BUG_ON(!cpu_online(cpu)); 334 BUG_ON(!cpu_online(cpu));
335 335
336 cpu_set(cpu, cpu_active_map); 336 set_cpu_active(cpu, true);
337 337
338 /* Now call notifier in preparation. */ 338 /* Now call notifier in preparation. */
339 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); 339 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f76db9dcaa05..026faccca869 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -128,10 +128,6 @@ static inline struct cpuset *task_cs(struct task_struct *task)
128 return container_of(task_subsys_state(task, cpuset_subsys_id), 128 return container_of(task_subsys_state(task, cpuset_subsys_id),
129 struct cpuset, css); 129 struct cpuset, css);
130} 130}
131struct cpuset_hotplug_scanner {
132 struct cgroup_scanner scan;
133 struct cgroup *to;
134};
135 131
136/* bits in struct cpuset flags field */ 132/* bits in struct cpuset flags field */
137typedef enum { 133typedef enum {
@@ -521,6 +517,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
521 return 0; 517 return 0;
522} 518}
523 519
520#ifdef CONFIG_SMP
524/* 521/*
525 * Helper routine for generate_sched_domains(). 522 * Helper routine for generate_sched_domains().
526 * Do cpusets a, b have overlapping cpus_allowed masks? 523 * Do cpusets a, b have overlapping cpus_allowed masks?
@@ -815,6 +812,18 @@ static void do_rebuild_sched_domains(struct work_struct *unused)
815 812
816 put_online_cpus(); 813 put_online_cpus();
817} 814}
815#else /* !CONFIG_SMP */
816static void do_rebuild_sched_domains(struct work_struct *unused)
817{
818}
819
820static int generate_sched_domains(struct cpumask **domains,
821 struct sched_domain_attr **attributes)
822{
823 *domains = NULL;
824 return 1;
825}
826#endif /* CONFIG_SMP */
818 827
819static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); 828static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
820 829
@@ -1026,101 +1035,70 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
1026 mutex_unlock(&callback_mutex); 1035 mutex_unlock(&callback_mutex);
1027} 1036}
1028 1037
1038/*
1039 * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new
1040 * nodes if memory_migrate flag is set. Called with cgroup_mutex held.
1041 */
1042static void cpuset_change_nodemask(struct task_struct *p,
1043 struct cgroup_scanner *scan)
1044{
1045 struct mm_struct *mm;
1046 struct cpuset *cs;
1047 int migrate;
1048 const nodemask_t *oldmem = scan->data;
1049
1050 mm = get_task_mm(p);
1051 if (!mm)
1052 return;
1053
1054 cs = cgroup_cs(scan->cg);
1055 migrate = is_memory_migrate(cs);
1056
1057 mpol_rebind_mm(mm, &cs->mems_allowed);
1058 if (migrate)
1059 cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1060 mmput(mm);
1061}
1062
1029static void *cpuset_being_rebound; 1063static void *cpuset_being_rebound;
1030 1064
1031/** 1065/**
1032 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. 1066 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
1033 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed 1067 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
1034 * @oldmem: old mems_allowed of cpuset cs 1068 * @oldmem: old mems_allowed of cpuset cs
1069 * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
1035 * 1070 *
1036 * Called with cgroup_mutex held 1071 * Called with cgroup_mutex held
1037 * Return 0 if successful, -errno if not. 1072 * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
1073 * if @heap != NULL.
1038 */ 1074 */
1039static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem) 1075static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem,
1076 struct ptr_heap *heap)
1040{ 1077{
1041 struct task_struct *p; 1078 struct cgroup_scanner scan;
1042 struct mm_struct **mmarray;
1043 int i, n, ntasks;
1044 int migrate;
1045 int fudge;
1046 struct cgroup_iter it;
1047 int retval;
1048 1079
1049 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ 1080 cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
1050 1081
1051 fudge = 10; /* spare mmarray[] slots */ 1082 scan.cg = cs->css.cgroup;
1052 fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */ 1083 scan.test_task = NULL;
1053 retval = -ENOMEM; 1084 scan.process_task = cpuset_change_nodemask;
1054 1085 scan.heap = heap;
1055 /* 1086 scan.data = (nodemask_t *)oldmem;
1056 * Allocate mmarray[] to hold mm reference for each task
1057 * in cpuset cs. Can't kmalloc GFP_KERNEL while holding
1058 * tasklist_lock. We could use GFP_ATOMIC, but with a
1059 * few more lines of code, we can retry until we get a big
1060 * enough mmarray[] w/o using GFP_ATOMIC.
1061 */
1062 while (1) {
1063 ntasks = cgroup_task_count(cs->css.cgroup); /* guess */
1064 ntasks += fudge;
1065 mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
1066 if (!mmarray)
1067 goto done;
1068 read_lock(&tasklist_lock); /* block fork */
1069 if (cgroup_task_count(cs->css.cgroup) <= ntasks)
1070 break; /* got enough */
1071 read_unlock(&tasklist_lock); /* try again */
1072 kfree(mmarray);
1073 }
1074
1075 n = 0;
1076
1077 /* Load up mmarray[] with mm reference for each task in cpuset. */
1078 cgroup_iter_start(cs->css.cgroup, &it);
1079 while ((p = cgroup_iter_next(cs->css.cgroup, &it))) {
1080 struct mm_struct *mm;
1081
1082 if (n >= ntasks) {
1083 printk(KERN_WARNING
1084 "Cpuset mempolicy rebind incomplete.\n");
1085 break;
1086 }
1087 mm = get_task_mm(p);
1088 if (!mm)
1089 continue;
1090 mmarray[n++] = mm;
1091 }
1092 cgroup_iter_end(cs->css.cgroup, &it);
1093 read_unlock(&tasklist_lock);
1094 1087
1095 /* 1088 /*
1096 * Now that we've dropped the tasklist spinlock, we can 1089 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
1097 * rebind the vma mempolicies of each mm in mmarray[] to their 1090 * take while holding tasklist_lock. Forks can happen - the
1098 * new cpuset, and release that mm. The mpol_rebind_mm() 1091 * mpol_dup() cpuset_being_rebound check will catch such forks,
1099 * call takes mmap_sem, which we couldn't take while holding 1092 * and rebind their vma mempolicies too. Because we still hold
1100 * tasklist_lock. Forks can happen again now - the mpol_dup() 1093 * the global cgroup_mutex, we know that no other rebind effort
1101 * cpuset_being_rebound check will catch such forks, and rebind 1094 * will be contending for the global variable cpuset_being_rebound.
1102 * their vma mempolicies too. Because we still hold the global
1103 * cgroup_mutex, we know that no other rebind effort will
1104 * be contending for the global variable cpuset_being_rebound.
1105 * It's ok if we rebind the same mm twice; mpol_rebind_mm() 1095 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1106 * is idempotent. Also migrate pages in each mm to new nodes. 1096 * is idempotent. Also migrate pages in each mm to new nodes.
1107 */ 1097 */
1108 migrate = is_memory_migrate(cs); 1098 cgroup_scan_tasks(&scan);
1109 for (i = 0; i < n; i++) {
1110 struct mm_struct *mm = mmarray[i];
1111
1112 mpol_rebind_mm(mm, &cs->mems_allowed);
1113 if (migrate)
1114 cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed);
1115 mmput(mm);
1116 }
1117 1099
1118 /* We're done rebinding vmas to this cpuset's new mems_allowed. */ 1100 /* We're done rebinding vmas to this cpuset's new mems_allowed. */
1119 kfree(mmarray);
1120 cpuset_being_rebound = NULL; 1101 cpuset_being_rebound = NULL;
1121 retval = 0;
1122done:
1123 return retval;
1124} 1102}
1125 1103
1126/* 1104/*
@@ -1141,6 +1119,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1141{ 1119{
1142 nodemask_t oldmem; 1120 nodemask_t oldmem;
1143 int retval; 1121 int retval;
1122 struct ptr_heap heap;
1144 1123
1145 /* 1124 /*
1146 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; 1125 * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY];
@@ -1175,12 +1154,18 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1175 if (retval < 0) 1154 if (retval < 0)
1176 goto done; 1155 goto done;
1177 1156
1157 retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
1158 if (retval < 0)
1159 goto done;
1160
1178 mutex_lock(&callback_mutex); 1161 mutex_lock(&callback_mutex);
1179 cs->mems_allowed = trialcs->mems_allowed; 1162 cs->mems_allowed = trialcs->mems_allowed;
1180 cs->mems_generation = cpuset_mems_generation++; 1163 cs->mems_generation = cpuset_mems_generation++;
1181 mutex_unlock(&callback_mutex); 1164 mutex_unlock(&callback_mutex);
1182 1165
1183 retval = update_tasks_nodemask(cs, &oldmem); 1166 update_tasks_nodemask(cs, &oldmem, &heap);
1167
1168 heap_free(&heap);
1184done: 1169done:
1185 return retval; 1170 return retval;
1186} 1171}
@@ -1192,8 +1177,10 @@ int current_cpuset_is_being_rebound(void)
1192 1177
1193static int update_relax_domain_level(struct cpuset *cs, s64 val) 1178static int update_relax_domain_level(struct cpuset *cs, s64 val)
1194{ 1179{
1180#ifdef CONFIG_SMP
1195 if (val < -1 || val >= SD_LV_MAX) 1181 if (val < -1 || val >= SD_LV_MAX)
1196 return -EINVAL; 1182 return -EINVAL;
1183#endif
1197 1184
1198 if (val != cs->relax_domain_level) { 1185 if (val != cs->relax_domain_level) {
1199 cs->relax_domain_level = val; 1186 cs->relax_domain_level = val;
@@ -1355,19 +1342,22 @@ static int cpuset_can_attach(struct cgroup_subsys *ss,
1355 struct cgroup *cont, struct task_struct *tsk) 1342 struct cgroup *cont, struct task_struct *tsk)
1356{ 1343{
1357 struct cpuset *cs = cgroup_cs(cont); 1344 struct cpuset *cs = cgroup_cs(cont);
1358 int ret = 0;
1359 1345
1360 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) 1346 if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
1361 return -ENOSPC; 1347 return -ENOSPC;
1362 1348
1363 if (tsk->flags & PF_THREAD_BOUND) { 1349 /*
1364 mutex_lock(&callback_mutex); 1350 * Kthreads bound to specific cpus cannot be moved to a new cpuset; we
1365 if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed)) 1351 * cannot change their cpu affinity and isolating such threads by their
1366 ret = -EINVAL; 1352 * set of allowed nodes is unnecessary. Thus, cpusets are not
1367 mutex_unlock(&callback_mutex); 1353 * applicable for such threads. This prevents checking for success of
1368 } 1354 * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may
1355 * be changed.
1356 */
1357 if (tsk->flags & PF_THREAD_BOUND)
1358 return -EINVAL;
1369 1359
1370 return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL); 1360 return security_task_setscheduler(tsk, 0, NULL);
1371} 1361}
1372 1362
1373static void cpuset_attach(struct cgroup_subsys *ss, 1363static void cpuset_attach(struct cgroup_subsys *ss,
@@ -1706,6 +1696,7 @@ static struct cftype files[] = {
1706 .read_u64 = cpuset_read_u64, 1696 .read_u64 = cpuset_read_u64,
1707 .write_u64 = cpuset_write_u64, 1697 .write_u64 = cpuset_write_u64,
1708 .private = FILE_MEMORY_PRESSURE, 1698 .private = FILE_MEMORY_PRESSURE,
1699 .mode = S_IRUGO,
1709 }, 1700 },
1710 1701
1711 { 1702 {
@@ -1913,10 +1904,9 @@ int __init cpuset_init(void)
1913static void cpuset_do_move_task(struct task_struct *tsk, 1904static void cpuset_do_move_task(struct task_struct *tsk,
1914 struct cgroup_scanner *scan) 1905 struct cgroup_scanner *scan)
1915{ 1906{
1916 struct cpuset_hotplug_scanner *chsp; 1907 struct cgroup *new_cgroup = scan->data;
1917 1908
1918 chsp = container_of(scan, struct cpuset_hotplug_scanner, scan); 1909 cgroup_attach_task(new_cgroup, tsk);
1919 cgroup_attach_task(chsp->to, tsk);
1920} 1910}
1921 1911
1922/** 1912/**
@@ -1932,15 +1922,15 @@ static void cpuset_do_move_task(struct task_struct *tsk,
1932 */ 1922 */
1933static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) 1923static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to)
1934{ 1924{
1935 struct cpuset_hotplug_scanner scan; 1925 struct cgroup_scanner scan;
1936 1926
1937 scan.scan.cg = from->css.cgroup; 1927 scan.cg = from->css.cgroup;
1938 scan.scan.test_task = NULL; /* select all tasks in cgroup */ 1928 scan.test_task = NULL; /* select all tasks in cgroup */
1939 scan.scan.process_task = cpuset_do_move_task; 1929 scan.process_task = cpuset_do_move_task;
1940 scan.scan.heap = NULL; 1930 scan.heap = NULL;
1941 scan.to = to->css.cgroup; 1931 scan.data = to->css.cgroup;
1942 1932
1943 if (cgroup_scan_tasks(&scan.scan)) 1933 if (cgroup_scan_tasks(&scan))
1944 printk(KERN_ERR "move_member_tasks_to_cpuset: " 1934 printk(KERN_ERR "move_member_tasks_to_cpuset: "
1945 "cgroup_scan_tasks failed\n"); 1935 "cgroup_scan_tasks failed\n");
1946} 1936}
@@ -2033,7 +2023,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
2033 remove_tasks_in_empty_cpuset(cp); 2023 remove_tasks_in_empty_cpuset(cp);
2034 else { 2024 else {
2035 update_tasks_cpumask(cp, NULL); 2025 update_tasks_cpumask(cp, NULL);
2036 update_tasks_nodemask(cp, &oldmems); 2026 update_tasks_nodemask(cp, &oldmems, NULL);
2037 } 2027 }
2038 } 2028 }
2039} 2029}
@@ -2069,7 +2059,9 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb,
2069 } 2059 }
2070 2060
2071 cgroup_lock(); 2061 cgroup_lock();
2062 mutex_lock(&callback_mutex);
2072 cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); 2063 cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask);
2064 mutex_unlock(&callback_mutex);
2073 scan_for_empty_cpusets(&top_cpuset); 2065 scan_for_empty_cpusets(&top_cpuset);
2074 ndoms = generate_sched_domains(&doms, &attr); 2066 ndoms = generate_sched_domains(&doms, &attr);
2075 cgroup_unlock(); 2067 cgroup_unlock();
@@ -2092,11 +2084,12 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
2092 cgroup_lock(); 2084 cgroup_lock();
2093 switch (action) { 2085 switch (action) {
2094 case MEM_ONLINE: 2086 case MEM_ONLINE:
2095 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2096 break;
2097 case MEM_OFFLINE: 2087 case MEM_OFFLINE:
2088 mutex_lock(&callback_mutex);
2098 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; 2089 top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
2099 scan_for_empty_cpusets(&top_cpuset); 2090 mutex_unlock(&callback_mutex);
2091 if (action == MEM_OFFLINE)
2092 scan_for_empty_cpusets(&top_cpuset);
2100 break; 2093 break;
2101 default: 2094 default:
2102 break; 2095 break;
@@ -2206,26 +2199,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2206} 2199}
2207 2200
2208/** 2201/**
2209 * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? 2202 * cpuset_node_allowed_softwall - Can we allocate on a memory node?
2210 * @z: is this zone on an allowed node? 2203 * @node: is this an allowed node?
2211 * @gfp_mask: memory allocation flags 2204 * @gfp_mask: memory allocation flags
2212 * 2205 *
2213 * If we're in interrupt, yes, we can always allocate. If 2206 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2214 * __GFP_THISNODE is set, yes, we can always allocate. If zone 2207 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2215 * z's node is in our tasks mems_allowed, yes. If it's not a 2208 * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest
2216 * __GFP_HARDWALL request and this zone's nodes is in the nearest 2209 * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been
2217 * hardwalled cpuset ancestor to this tasks cpuset, yes. 2210 * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE
2218 * If the task has been OOM killed and has access to memory reserves 2211 * flag, yes.
2219 * as specified by the TIF_MEMDIE flag, yes.
2220 * Otherwise, no. 2212 * Otherwise, no.
2221 * 2213 *
2222 * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() 2214 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2223 * reduces to cpuset_zone_allowed_hardwall(). Otherwise, 2215 * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
2224 * cpuset_zone_allowed_softwall() might sleep, and might allow a zone 2216 * might sleep, and might allow a node from an enclosing cpuset.
2225 * from an enclosing cpuset.
2226 * 2217 *
2227 * cpuset_zone_allowed_hardwall() only handles the simpler case of 2218 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2228 * hardwall cpusets, and never sleeps. 2219 * cpusets, and never sleeps.
2229 * 2220 *
2230 * The __GFP_THISNODE placement logic is really handled elsewhere, 2221 * The __GFP_THISNODE placement logic is really handled elsewhere,
2231 * by forcibly using a zonelist starting at a specified node, and by 2222 * by forcibly using a zonelist starting at a specified node, and by
@@ -2264,20 +2255,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
2264 * GFP_USER - only nodes in current tasks mems allowed ok. 2255 * GFP_USER - only nodes in current tasks mems allowed ok.
2265 * 2256 *
2266 * Rule: 2257 * Rule:
2267 * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you 2258 * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2268 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables 2259 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2269 * the code that might scan up ancestor cpusets and sleep. 2260 * the code that might scan up ancestor cpusets and sleep.
2270 */ 2261 */
2271 2262int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2272int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
2273{ 2263{
2274 int node; /* node that zone z is on */
2275 const struct cpuset *cs; /* current cpuset ancestors */ 2264 const struct cpuset *cs; /* current cpuset ancestors */
2276 int allowed; /* is allocation in zone z allowed? */ 2265 int allowed; /* is allocation in zone z allowed? */
2277 2266
2278 if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) 2267 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2279 return 1; 2268 return 1;
2280 node = zone_to_nid(z);
2281 might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); 2269 might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2282 if (node_isset(node, current->mems_allowed)) 2270 if (node_isset(node, current->mems_allowed))
2283 return 1; 2271 return 1;
@@ -2306,15 +2294,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
2306} 2294}
2307 2295
2308/* 2296/*
2309 * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? 2297 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2310 * @z: is this zone on an allowed node? 2298 * @node: is this an allowed node?
2311 * @gfp_mask: memory allocation flags 2299 * @gfp_mask: memory allocation flags
2312 * 2300 *
2313 * If we're in interrupt, yes, we can always allocate. 2301 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2314 * If __GFP_THISNODE is set, yes, we can always allocate. If zone 2302 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2315 * z's node is in our tasks mems_allowed, yes. If the task has been 2303 * yes. If the task has been OOM killed and has access to memory reserves as
2316 * OOM killed and has access to memory reserves as specified by the 2304 * specified by the TIF_MEMDIE flag, yes.
2317 * TIF_MEMDIE flag, yes. Otherwise, no. 2305 * Otherwise, no.
2318 * 2306 *
2319 * The __GFP_THISNODE placement logic is really handled elsewhere, 2307 * The __GFP_THISNODE placement logic is really handled elsewhere,
2320 * by forcibly using a zonelist starting at a specified node, and by 2308 * by forcibly using a zonelist starting at a specified node, and by
@@ -2322,20 +2310,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
2322 * any node on the zonelist except the first. By the time any such 2310 * any node on the zonelist except the first. By the time any such
2323 * calls get to this routine, we should just shut up and say 'yes'. 2311 * calls get to this routine, we should just shut up and say 'yes'.
2324 * 2312 *
2325 * Unlike the cpuset_zone_allowed_softwall() variant, above, 2313 * Unlike the cpuset_node_allowed_softwall() variant, above,
2326 * this variant requires that the zone be in the current tasks 2314 * this variant requires that the node be in the current task's
2327 * mems_allowed or that we're in interrupt. It does not scan up the 2315 * mems_allowed or that we're in interrupt. It does not scan up the
2328 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. 2316 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2329 * It never sleeps. 2317 * It never sleeps.
2330 */ 2318 */
2331 2319int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2332int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
2333{ 2320{
2334 int node; /* node that zone z is on */
2335
2336 if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) 2321 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2337 return 1; 2322 return 1;
2338 node = zone_to_nid(z);
2339 if (node_isset(node, current->mems_allowed)) 2323 if (node_isset(node, current->mems_allowed))
2340 return 1; 2324 return 1;
2341 /* 2325 /*
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 667c841c2952..c35452cadded 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -18,6 +18,7 @@
18#include <linux/syscalls.h> 18#include <linux/syscalls.h>
19#include <linux/sysctl.h> 19#include <linux/sysctl.h>
20#include <linux/types.h> 20#include <linux/types.h>
21#include <linux/fs_struct.h>
21 22
22 23
23static void default_handler(int, struct pt_regs *); 24static void default_handler(int, struct pt_regs *);
@@ -145,28 +146,6 @@ __set_personality(u_long personality)
145 return 0; 146 return 0;
146 } 147 }
147 148
148 if (atomic_read(&current->fs->count) != 1) {
149 struct fs_struct *fsp, *ofsp;
150
151 fsp = copy_fs_struct(current->fs);
152 if (fsp == NULL) {
153 module_put(ep->module);
154 return -ENOMEM;
155 }
156
157 task_lock(current);
158 ofsp = current->fs;
159 current->fs = fsp;
160 task_unlock(current);
161
162 put_fs_struct(ofsp);
163 }
164
165 /*
166 * At that point we are guaranteed to be the sole owner of
167 * current->fs.
168 */
169
170 current->personality = personality; 149 current->personality = personality;
171 oep = current_thread_info()->exec_domain; 150 oep = current_thread_info()->exec_domain;
172 current_thread_info()->exec_domain = ep; 151 current_thread_info()->exec_domain = ep;
diff --git a/kernel/exit.c b/kernel/exit.c
index efd30ccf3858..6686ed1e4aa3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -46,6 +46,7 @@
46#include <linux/blkdev.h> 46#include <linux/blkdev.h>
47#include <linux/task_io_accounting_ops.h> 47#include <linux/task_io_accounting_ops.h>
48#include <linux/tracehook.h> 48#include <linux/tracehook.h>
49#include <linux/fs_struct.h>
49#include <linux/init_task.h> 50#include <linux/init_task.h>
50#include <trace/sched.h> 51#include <trace/sched.h>
51 52
@@ -61,11 +62,6 @@ DEFINE_TRACE(sched_process_wait);
61 62
62static void exit_mm(struct task_struct * tsk); 63static void exit_mm(struct task_struct * tsk);
63 64
64static inline int task_detached(struct task_struct *p)
65{
66 return p->exit_signal == -1;
67}
68
69static void __unhash_process(struct task_struct *p) 65static void __unhash_process(struct task_struct *p)
70{ 66{
71 nr_threads--; 67 nr_threads--;
@@ -362,16 +358,12 @@ static void reparent_to_kthreadd(void)
362void __set_special_pids(struct pid *pid) 358void __set_special_pids(struct pid *pid)
363{ 359{
364 struct task_struct *curr = current->group_leader; 360 struct task_struct *curr = current->group_leader;
365 pid_t nr = pid_nr(pid);
366 361
367 if (task_session(curr) != pid) { 362 if (task_session(curr) != pid)
368 change_pid(curr, PIDTYPE_SID, pid); 363 change_pid(curr, PIDTYPE_SID, pid);
369 set_task_session(curr, nr); 364
370 } 365 if (task_pgrp(curr) != pid)
371 if (task_pgrp(curr) != pid) {
372 change_pid(curr, PIDTYPE_PGID, pid); 366 change_pid(curr, PIDTYPE_PGID, pid);
373 set_task_pgrp(curr, nr);
374 }
375} 367}
376 368
377static void set_special_pids(struct pid *pid) 369static void set_special_pids(struct pid *pid)
@@ -429,7 +421,6 @@ EXPORT_SYMBOL(disallow_signal);
429void daemonize(const char *name, ...) 421void daemonize(const char *name, ...)
430{ 422{
431 va_list args; 423 va_list args;
432 struct fs_struct *fs;
433 sigset_t blocked; 424 sigset_t blocked;
434 425
435 va_start(args, name); 426 va_start(args, name);
@@ -462,11 +453,7 @@ void daemonize(const char *name, ...)
462 453
463 /* Become as one with the init task */ 454 /* Become as one with the init task */
464 455
465 exit_fs(current); /* current->fs->count--; */ 456 daemonize_fs_struct();
466 fs = init_task.fs;
467 current->fs = fs;
468 atomic_inc(&fs->count);
469
470 exit_files(current); 457 exit_files(current);
471 current->files = init_task.files; 458 current->files = init_task.files;
472 atomic_inc(&current->files->count); 459 atomic_inc(&current->files->count);
@@ -565,30 +552,6 @@ void exit_files(struct task_struct *tsk)
565 } 552 }
566} 553}
567 554
568void put_fs_struct(struct fs_struct *fs)
569{
570 /* No need to hold fs->lock if we are killing it */
571 if (atomic_dec_and_test(&fs->count)) {
572 path_put(&fs->root);
573 path_put(&fs->pwd);
574 kmem_cache_free(fs_cachep, fs);
575 }
576}
577
578void exit_fs(struct task_struct *tsk)
579{
580 struct fs_struct * fs = tsk->fs;
581
582 if (fs) {
583 task_lock(tsk);
584 tsk->fs = NULL;
585 task_unlock(tsk);
586 put_fs_struct(fs);
587 }
588}
589
590EXPORT_SYMBOL_GPL(exit_fs);
591
592#ifdef CONFIG_MM_OWNER 555#ifdef CONFIG_MM_OWNER
593/* 556/*
594 * Task p is exiting and it owned mm, lets find a new owner for it 557 * Task p is exiting and it owned mm, lets find a new owner for it
@@ -732,119 +695,6 @@ static void exit_mm(struct task_struct * tsk)
732} 695}
733 696
734/* 697/*
735 * Return nonzero if @parent's children should reap themselves.
736 *
737 * Called with write_lock_irq(&tasklist_lock) held.
738 */
739static int ignoring_children(struct task_struct *parent)
740{
741 int ret;
742 struct sighand_struct *psig = parent->sighand;
743 unsigned long flags;
744 spin_lock_irqsave(&psig->siglock, flags);
745 ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
746 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT));
747 spin_unlock_irqrestore(&psig->siglock, flags);
748 return ret;
749}
750
751/*
752 * Detach all tasks we were using ptrace on.
753 * Any that need to be release_task'd are put on the @dead list.
754 *
755 * Called with write_lock(&tasklist_lock) held.
756 */
757static void ptrace_exit(struct task_struct *parent, struct list_head *dead)
758{
759 struct task_struct *p, *n;
760 int ign = -1;
761
762 list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) {
763 __ptrace_unlink(p);
764
765 if (p->exit_state != EXIT_ZOMBIE)
766 continue;
767
768 /*
769 * If it's a zombie, our attachedness prevented normal
770 * parent notification or self-reaping. Do notification
771 * now if it would have happened earlier. If it should
772 * reap itself, add it to the @dead list. We can't call
773 * release_task() here because we already hold tasklist_lock.
774 *
775 * If it's our own child, there is no notification to do.
776 * But if our normal children self-reap, then this child
777 * was prevented by ptrace and we must reap it now.
778 */
779 if (!task_detached(p) && thread_group_empty(p)) {
780 if (!same_thread_group(p->real_parent, parent))
781 do_notify_parent(p, p->exit_signal);
782 else {
783 if (ign < 0)
784 ign = ignoring_children(parent);
785 if (ign)
786 p->exit_signal = -1;
787 }
788 }
789
790 if (task_detached(p)) {
791 /*
792 * Mark it as in the process of being reaped.
793 */
794 p->exit_state = EXIT_DEAD;
795 list_add(&p->ptrace_entry, dead);
796 }
797 }
798}
799
800/*
801 * Finish up exit-time ptrace cleanup.
802 *
803 * Called without locks.
804 */
805static void ptrace_exit_finish(struct task_struct *parent,
806 struct list_head *dead)
807{
808 struct task_struct *p, *n;
809
810 BUG_ON(!list_empty(&parent->ptraced));
811
812 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
813 list_del_init(&p->ptrace_entry);
814 release_task(p);
815 }
816}
817
818static void reparent_thread(struct task_struct *p, struct task_struct *father)
819{
820 if (p->pdeath_signal)
821 /* We already hold the tasklist_lock here. */
822 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
823
824 list_move_tail(&p->sibling, &p->real_parent->children);
825
826 /* If this is a threaded reparent there is no need to
827 * notify anyone anything has happened.
828 */
829 if (same_thread_group(p->real_parent, father))
830 return;
831
832 /* We don't want people slaying init. */
833 if (!task_detached(p))
834 p->exit_signal = SIGCHLD;
835
836 /* If we'd notified the old parent about this child's death,
837 * also notify the new parent.
838 */
839 if (!ptrace_reparented(p) &&
840 p->exit_state == EXIT_ZOMBIE &&
841 !task_detached(p) && thread_group_empty(p))
842 do_notify_parent(p, p->exit_signal);
843
844 kill_orphaned_pgrp(p, father);
845}
846
847/*
848 * When we die, we re-parent all our children. 698 * When we die, we re-parent all our children.
849 * Try to give them to another thread in our thread 699 * Try to give them to another thread in our thread
850 * group, and if no such member exists, give it to 700 * group, and if no such member exists, give it to
@@ -883,17 +733,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
883 return pid_ns->child_reaper; 733 return pid_ns->child_reaper;
884} 734}
885 735
736/*
737* Any that need to be release_task'd are put on the @dead list.
738 */
739static void reparent_thread(struct task_struct *father, struct task_struct *p,
740 struct list_head *dead)
741{
742 if (p->pdeath_signal)
743 group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p);
744
745 list_move_tail(&p->sibling, &p->real_parent->children);
746
747 if (task_detached(p))
748 return;
749 /*
750 * If this is a threaded reparent there is no need to
751 * notify anyone anything has happened.
752 */
753 if (same_thread_group(p->real_parent, father))
754 return;
755
756 /* We don't want people slaying init. */
757 p->exit_signal = SIGCHLD;
758
759 /* If it has exited notify the new parent about this child's death. */
760 if (!p->ptrace &&
761 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
762 do_notify_parent(p, p->exit_signal);
763 if (task_detached(p)) {
764 p->exit_state = EXIT_DEAD;
765 list_move_tail(&p->sibling, dead);
766 }
767 }
768
769 kill_orphaned_pgrp(p, father);
770}
771
886static void forget_original_parent(struct task_struct *father) 772static void forget_original_parent(struct task_struct *father)
887{ 773{
888 struct task_struct *p, *n, *reaper; 774 struct task_struct *p, *n, *reaper;
889 LIST_HEAD(ptrace_dead); 775 LIST_HEAD(dead_children);
776
777 exit_ptrace(father);
890 778
891 write_lock_irq(&tasklist_lock); 779 write_lock_irq(&tasklist_lock);
892 reaper = find_new_reaper(father); 780 reaper = find_new_reaper(father);
893 /*
894 * First clean up ptrace if we were using it.
895 */
896 ptrace_exit(father, &ptrace_dead);
897 781
898 list_for_each_entry_safe(p, n, &father->children, sibling) { 782 list_for_each_entry_safe(p, n, &father->children, sibling) {
899 p->real_parent = reaper; 783 p->real_parent = reaper;
@@ -901,13 +785,16 @@ static void forget_original_parent(struct task_struct *father)
901 BUG_ON(p->ptrace); 785 BUG_ON(p->ptrace);
902 p->parent = p->real_parent; 786 p->parent = p->real_parent;
903 } 787 }
904 reparent_thread(p, father); 788 reparent_thread(father, p, &dead_children);
905 } 789 }
906
907 write_unlock_irq(&tasklist_lock); 790 write_unlock_irq(&tasklist_lock);
791
908 BUG_ON(!list_empty(&father->children)); 792 BUG_ON(!list_empty(&father->children));
909 793
910 ptrace_exit_finish(father, &ptrace_dead); 794 list_for_each_entry_safe(p, n, &dead_children, sibling) {
795 list_del_init(&p->sibling);
796 release_task(p);
797 }
911} 798}
912 799
913/* 800/*
@@ -980,12 +867,9 @@ static void check_stack_usage(void)
980{ 867{
981 static DEFINE_SPINLOCK(low_water_lock); 868 static DEFINE_SPINLOCK(low_water_lock);
982 static int lowest_to_date = THREAD_SIZE; 869 static int lowest_to_date = THREAD_SIZE;
983 unsigned long *n = end_of_stack(current);
984 unsigned long free; 870 unsigned long free;
985 871
986 while (*n == 0) 872 free = stack_not_used(current);
987 n++;
988 free = (unsigned long)n - (unsigned long)end_of_stack(current);
989 873
990 if (free >= lowest_to_date) 874 if (free >= lowest_to_date)
991 return; 875 return;
@@ -1420,6 +1304,18 @@ static int wait_task_zombie(struct task_struct *p, int options,
1420 return retval; 1304 return retval;
1421} 1305}
1422 1306
1307static int *task_stopped_code(struct task_struct *p, bool ptrace)
1308{
1309 if (ptrace) {
1310 if (task_is_stopped_or_traced(p))
1311 return &p->exit_code;
1312 } else {
1313 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1314 return &p->signal->group_exit_code;
1315 }
1316 return NULL;
1317}
1318
1423/* 1319/*
1424 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold 1320 * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold
1425 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold 1321 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
@@ -1430,7 +1326,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
1430 int options, struct siginfo __user *infop, 1326 int options, struct siginfo __user *infop,
1431 int __user *stat_addr, struct rusage __user *ru) 1327 int __user *stat_addr, struct rusage __user *ru)
1432{ 1328{
1433 int retval, exit_code, why; 1329 int retval, exit_code, *p_code, why;
1434 uid_t uid = 0; /* unneeded, required by compiler */ 1330 uid_t uid = 0; /* unneeded, required by compiler */
1435 pid_t pid; 1331 pid_t pid;
1436 1332
@@ -1440,22 +1336,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p,
1440 exit_code = 0; 1336 exit_code = 0;
1441 spin_lock_irq(&p->sighand->siglock); 1337 spin_lock_irq(&p->sighand->siglock);
1442 1338
1443 if (unlikely(!task_is_stopped_or_traced(p))) 1339 p_code = task_stopped_code(p, ptrace);
1444 goto unlock_sig; 1340 if (unlikely(!p_code))
1445
1446 if (!ptrace && p->signal->group_stop_count > 0)
1447 /*
1448 * A group stop is in progress and this is the group leader.
1449 * We won't report until all threads have stopped.
1450 */
1451 goto unlock_sig; 1341 goto unlock_sig;
1452 1342
1453 exit_code = p->exit_code; 1343 exit_code = *p_code;
1454 if (!exit_code) 1344 if (!exit_code)
1455 goto unlock_sig; 1345 goto unlock_sig;
1456 1346
1457 if (!unlikely(options & WNOWAIT)) 1347 if (!unlikely(options & WNOWAIT))
1458 p->exit_code = 0; 1348 *p_code = 0;
1459 1349
1460 /* don't need the RCU readlock here as we're holding a spinlock */ 1350 /* don't need the RCU readlock here as we're holding a spinlock */
1461 uid = __task_cred(p)->uid; 1351 uid = __task_cred(p)->uid;
@@ -1611,7 +1501,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace,
1611 */ 1501 */
1612 *notask_error = 0; 1502 *notask_error = 0;
1613 1503
1614 if (task_is_stopped_or_traced(p)) 1504 if (task_stopped_code(p, ptrace))
1615 return wait_task_stopped(ptrace, p, options, 1505 return wait_task_stopped(ptrace, p, options,
1616 infop, stat_addr, ru); 1506 infop, stat_addr, ru);
1617 1507
@@ -1815,7 +1705,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1815 pid = find_get_pid(-upid); 1705 pid = find_get_pid(-upid);
1816 } else if (upid == 0) { 1706 } else if (upid == 0) {
1817 type = PIDTYPE_PGID; 1707 type = PIDTYPE_PGID;
1818 pid = get_pid(task_pgrp(current)); 1708 pid = get_task_pid(current, PIDTYPE_PGID);
1819 } else /* upid > 0 */ { 1709 } else /* upid > 0 */ {
1820 type = PIDTYPE_PID; 1710 type = PIDTYPE_PID;
1821 pid = find_get_pid(upid); 1711 pid = find_get_pid(upid);
diff --git a/kernel/fork.c b/kernel/fork.c
index 4854c2c4a82e..660c2b8765bc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -60,7 +60,9 @@
60#include <linux/tty.h> 60#include <linux/tty.h>
61#include <linux/proc_fs.h> 61#include <linux/proc_fs.h>
62#include <linux/blkdev.h> 62#include <linux/blkdev.h>
63#include <linux/fs_struct.h>
63#include <trace/sched.h> 64#include <trace/sched.h>
65#include <linux/magic.h>
64 66
65#include <asm/pgtable.h> 67#include <asm/pgtable.h>
66#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
@@ -212,6 +214,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
212{ 214{
213 struct task_struct *tsk; 215 struct task_struct *tsk;
214 struct thread_info *ti; 216 struct thread_info *ti;
217 unsigned long *stackend;
218
215 int err; 219 int err;
216 220
217 prepare_to_copy(orig); 221 prepare_to_copy(orig);
@@ -237,6 +241,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
237 goto out; 241 goto out;
238 242
239 setup_thread_stack(tsk, orig); 243 setup_thread_stack(tsk, orig);
244 stackend = end_of_stack(tsk);
245 *stackend = STACK_END_MAGIC; /* for overflow detection */
240 246
241#ifdef CONFIG_CC_STACKPROTECTOR 247#ifdef CONFIG_CC_STACKPROTECTOR
242 tsk->stack_canary = get_random_int(); 248 tsk->stack_canary = get_random_int();
@@ -279,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
279 mm->free_area_cache = oldmm->mmap_base; 285 mm->free_area_cache = oldmm->mmap_base;
280 mm->cached_hole_size = ~0UL; 286 mm->cached_hole_size = ~0UL;
281 mm->map_count = 0; 287 mm->map_count = 0;
282 cpus_clear(mm->cpu_vm_mask); 288 cpumask_clear(mm_cpumask(mm));
283 mm->mm_rb = RB_ROOT; 289 mm->mm_rb = RB_ROOT;
284 rb_link = &mm->mm_rb.rb_node; 290 rb_link = &mm->mm_rb.rb_node;
285 rb_parent = NULL; 291 rb_parent = NULL;
@@ -676,38 +682,21 @@ fail_nomem:
676 return retval; 682 return retval;
677} 683}
678 684
679static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
680{
681 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
682 /* We don't need to lock fs - think why ;-) */
683 if (fs) {
684 atomic_set(&fs->count, 1);
685 rwlock_init(&fs->lock);
686 fs->umask = old->umask;
687 read_lock(&old->lock);
688 fs->root = old->root;
689 path_get(&old->root);
690 fs->pwd = old->pwd;
691 path_get(&old->pwd);
692 read_unlock(&old->lock);
693 }
694 return fs;
695}
696
697struct fs_struct *copy_fs_struct(struct fs_struct *old)
698{
699 return __copy_fs_struct(old);
700}
701
702EXPORT_SYMBOL_GPL(copy_fs_struct);
703
704static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 685static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
705{ 686{
687 struct fs_struct *fs = current->fs;
706 if (clone_flags & CLONE_FS) { 688 if (clone_flags & CLONE_FS) {
707 atomic_inc(&current->fs->count); 689 /* tsk->fs is already what we want */
690 write_lock(&fs->lock);
691 if (fs->in_exec) {
692 write_unlock(&fs->lock);
693 return -EAGAIN;
694 }
695 fs->users++;
696 write_unlock(&fs->lock);
708 return 0; 697 return 0;
709 } 698 }
710 tsk->fs = __copy_fs_struct(current->fs); 699 tsk->fs = copy_fs_struct(fs);
711 if (!tsk->fs) 700 if (!tsk->fs)
712 return -ENOMEM; 701 return -ENOMEM;
713 return 0; 702 return 0;
@@ -836,6 +825,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
836 atomic_set(&sig->live, 1); 825 atomic_set(&sig->live, 1);
837 init_waitqueue_head(&sig->wait_chldexit); 826 init_waitqueue_head(&sig->wait_chldexit);
838 sig->flags = 0; 827 sig->flags = 0;
828 if (clone_flags & CLONE_NEWPID)
829 sig->flags |= SIGNAL_UNKILLABLE;
839 sig->group_exit_code = 0; 830 sig->group_exit_code = 0;
840 sig->group_exit_task = NULL; 831 sig->group_exit_task = NULL;
841 sig->group_stop_count = 0; 832 sig->group_stop_count = 0;
@@ -1120,7 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1120 goto bad_fork_cleanup_mm; 1111 goto bad_fork_cleanup_mm;
1121 if ((retval = copy_io(clone_flags, p))) 1112 if ((retval = copy_io(clone_flags, p)))
1122 goto bad_fork_cleanup_namespaces; 1113 goto bad_fork_cleanup_namespaces;
1123 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); 1114 retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1124 if (retval) 1115 if (retval)
1125 goto bad_fork_cleanup_io; 1116 goto bad_fork_cleanup_io;
1126 1117
@@ -1258,8 +1249,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1258 p->signal->leader_pid = pid; 1249 p->signal->leader_pid = pid;
1259 tty_kref_put(p->signal->tty); 1250 tty_kref_put(p->signal->tty);
1260 p->signal->tty = tty_kref_get(current->signal->tty); 1251 p->signal->tty = tty_kref_get(current->signal->tty);
1261 set_task_pgrp(p, task_pgrp_nr(current));
1262 set_task_session(p, task_session_nr(current));
1263 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1252 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1264 attach_pid(p, PIDTYPE_SID, task_session(current)); 1253 attach_pid(p, PIDTYPE_SID, task_session(current));
1265 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1254 list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -1483,6 +1472,7 @@ void __init proc_caches_init(void)
1483 mm_cachep = kmem_cache_create("mm_struct", 1472 mm_cachep = kmem_cache_create("mm_struct",
1484 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1473 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1485 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1474 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1475 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1486 mmap_init(); 1476 mmap_init();
1487} 1477}
1488 1478
@@ -1538,12 +1528,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1538{ 1528{
1539 struct fs_struct *fs = current->fs; 1529 struct fs_struct *fs = current->fs;
1540 1530
1541 if ((unshare_flags & CLONE_FS) && 1531 if (!(unshare_flags & CLONE_FS) || !fs)
1542 (fs && atomic_read(&fs->count) > 1)) { 1532 return 0;
1543 *new_fsp = __copy_fs_struct(current->fs); 1533
1544 if (!*new_fsp) 1534 /* don't need lock here; in the worst case we'll do useless copy */
1545 return -ENOMEM; 1535 if (fs->users == 1)
1546 } 1536 return 0;
1537
1538 *new_fsp = copy_fs_struct(fs);
1539 if (!*new_fsp)
1540 return -ENOMEM;
1547 1541
1548 return 0; 1542 return 0;
1549} 1543}
@@ -1659,8 +1653,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1659 1653
1660 if (new_fs) { 1654 if (new_fs) {
1661 fs = current->fs; 1655 fs = current->fs;
1656 write_lock(&fs->lock);
1662 current->fs = new_fs; 1657 current->fs = new_fs;
1663 new_fs = fs; 1658 if (--fs->users)
1659 new_fs = NULL;
1660 else
1661 new_fs = fs;
1662 write_unlock(&fs->lock);
1664 } 1663 }
1665 1664
1666 if (new_mm) { 1665 if (new_mm) {
@@ -1699,7 +1698,7 @@ bad_unshare_cleanup_sigh:
1699 1698
1700bad_unshare_cleanup_fs: 1699bad_unshare_cleanup_fs:
1701 if (new_fs) 1700 if (new_fs)
1702 put_fs_struct(new_fs); 1701 free_fs_struct(new_fs);
1703 1702
1704bad_unshare_cleanup_thread: 1703bad_unshare_cleanup_thread:
1705bad_unshare_out: 1704bad_unshare_out:
diff --git a/kernel/futex.c b/kernel/futex.c
index 438701adce23..6b50a024bca2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -114,7 +114,9 @@ struct futex_q {
114}; 114};
115 115
116/* 116/*
117 * Split the global futex_lock into every hash list lock. 117 * Hash buckets are shared by all the futex_keys that hash to the same
118 * location. Each key may have multiple futex_q structures, one for each task
119 * waiting on a futex.
118 */ 120 */
119struct futex_hash_bucket { 121struct futex_hash_bucket {
120 spinlock_t lock; 122 spinlock_t lock;
@@ -189,8 +191,7 @@ static void drop_futex_key_refs(union futex_key *key)
189/** 191/**
190 * get_futex_key - Get parameters which are the keys for a futex. 192 * get_futex_key - Get parameters which are the keys for a futex.
191 * @uaddr: virtual address of the futex 193 * @uaddr: virtual address of the futex
192 * @shared: NULL for a PROCESS_PRIVATE futex, 194 * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED
193 * &current->mm->mmap_sem for a PROCESS_SHARED futex
194 * @key: address where result is stored. 195 * @key: address where result is stored.
195 * 196 *
196 * Returns a negative error code or 0 197 * Returns a negative error code or 0
@@ -200,9 +201,7 @@ static void drop_futex_key_refs(union futex_key *key)
200 * offset_within_page). For private mappings, it's (uaddr, current->mm). 201 * offset_within_page). For private mappings, it's (uaddr, current->mm).
201 * We can usually work out the index without swapping in the page. 202 * We can usually work out the index without swapping in the page.
202 * 203 *
203 * fshared is NULL for PROCESS_PRIVATE futexes 204 * lock_page() might sleep, the caller should not hold a spinlock.
204 * For other futexes, it points to &current->mm->mmap_sem and
205 * caller must have taken the reader lock. but NOT any spinlocks.
206 */ 205 */
207static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) 206static int get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key)
208{ 207{
@@ -299,41 +298,6 @@ static int get_futex_value_locked(u32 *dest, u32 __user *from)
299 return ret ? -EFAULT : 0; 298 return ret ? -EFAULT : 0;
300} 299}
301 300
302/*
303 * Fault handling.
304 */
305static int futex_handle_fault(unsigned long address, int attempt)
306{
307 struct vm_area_struct * vma;
308 struct mm_struct *mm = current->mm;
309 int ret = -EFAULT;
310
311 if (attempt > 2)
312 return ret;
313
314 down_read(&mm->mmap_sem);
315 vma = find_vma(mm, address);
316 if (vma && address >= vma->vm_start &&
317 (vma->vm_flags & VM_WRITE)) {
318 int fault;
319 fault = handle_mm_fault(mm, vma, address, 1);
320 if (unlikely((fault & VM_FAULT_ERROR))) {
321#if 0
322 /* XXX: let's do this when we verify it is OK */
323 if (ret & VM_FAULT_OOM)
324 ret = -ENOMEM;
325#endif
326 } else {
327 ret = 0;
328 if (fault & VM_FAULT_MAJOR)
329 current->maj_flt++;
330 else
331 current->min_flt++;
332 }
333 }
334 up_read(&mm->mmap_sem);
335 return ret;
336}
337 301
338/* 302/*
339 * PI code: 303 * PI code:
@@ -589,10 +553,9 @@ static void wake_futex(struct futex_q *q)
589 * The waiting task can free the futex_q as soon as this is written, 553 * The waiting task can free the futex_q as soon as this is written,
590 * without taking any locks. This must come last. 554 * without taking any locks. This must come last.
591 * 555 *
592 * A memory barrier is required here to prevent the following store 556 * A memory barrier is required here to prevent the following store to
593 * to lock_ptr from getting ahead of the wakeup. Clearing the lock 557 * lock_ptr from getting ahead of the wakeup. Clearing the lock at the
594 * at the end of wake_up_all() does not prevent this store from 558 * end of wake_up() does not prevent this store from moving.
595 * moving.
596 */ 559 */
597 smp_wmb(); 560 smp_wmb();
598 q->lock_ptr = NULL; 561 q->lock_ptr = NULL;
@@ -692,9 +655,16 @@ double_lock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
692 } 655 }
693} 656}
694 657
658static inline void
659double_unlock_hb(struct futex_hash_bucket *hb1, struct futex_hash_bucket *hb2)
660{
661 spin_unlock(&hb1->lock);
662 if (hb1 != hb2)
663 spin_unlock(&hb2->lock);
664}
665
695/* 666/*
696 * Wake up all waiters hashed on the physical page that is mapped 667 * Wake up waiters matching bitset queued on this futex (uaddr).
697 * to this virtual address:
698 */ 668 */
699static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) 669static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset)
700{ 670{
@@ -750,9 +720,9 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2,
750 struct futex_hash_bucket *hb1, *hb2; 720 struct futex_hash_bucket *hb1, *hb2;
751 struct plist_head *head; 721 struct plist_head *head;
752 struct futex_q *this, *next; 722 struct futex_q *this, *next;
753 int ret, op_ret, attempt = 0; 723 int ret, op_ret;
754 724
755retryfull: 725retry:
756 ret = get_futex_key(uaddr1, fshared, &key1); 726 ret = get_futex_key(uaddr1, fshared, &key1);
757 if (unlikely(ret != 0)) 727 if (unlikely(ret != 0))
758 goto out; 728 goto out;
@@ -763,16 +733,13 @@ retryfull:
763 hb1 = hash_futex(&key1); 733 hb1 = hash_futex(&key1);
764 hb2 = hash_futex(&key2); 734 hb2 = hash_futex(&key2);
765 735
766retry:
767 double_lock_hb(hb1, hb2); 736 double_lock_hb(hb1, hb2);
768 737retry_private:
769 op_ret = futex_atomic_op_inuser(op, uaddr2); 738 op_ret = futex_atomic_op_inuser(op, uaddr2);
770 if (unlikely(op_ret < 0)) { 739 if (unlikely(op_ret < 0)) {
771 u32 dummy; 740 u32 dummy;
772 741
773 spin_unlock(&hb1->lock); 742 double_unlock_hb(hb1, hb2);
774 if (hb1 != hb2)
775 spin_unlock(&hb2->lock);
776 743
777#ifndef CONFIG_MMU 744#ifndef CONFIG_MMU
778 /* 745 /*
@@ -788,26 +755,16 @@ retry:
788 goto out_put_keys; 755 goto out_put_keys;
789 } 756 }
790 757
791 /*
792 * futex_atomic_op_inuser needs to both read and write
793 * *(int __user *)uaddr2, but we can't modify it
794 * non-atomically. Therefore, if get_user below is not
795 * enough, we need to handle the fault ourselves, while
796 * still holding the mmap_sem.
797 */
798 if (attempt++) {
799 ret = futex_handle_fault((unsigned long)uaddr2,
800 attempt);
801 if (ret)
802 goto out_put_keys;
803 goto retry;
804 }
805
806 ret = get_user(dummy, uaddr2); 758 ret = get_user(dummy, uaddr2);
807 if (ret) 759 if (ret)
808 return ret; 760 goto out_put_keys;
761
762 if (!fshared)
763 goto retry_private;
809 764
810 goto retryfull; 765 put_futex_key(fshared, &key2);
766 put_futex_key(fshared, &key1);
767 goto retry;
811 } 768 }
812 769
813 head = &hb1->chain; 770 head = &hb1->chain;
@@ -834,9 +791,7 @@ retry:
834 ret += op_ret; 791 ret += op_ret;
835 } 792 }
836 793
837 spin_unlock(&hb1->lock); 794 double_unlock_hb(hb1, hb2);
838 if (hb1 != hb2)
839 spin_unlock(&hb2->lock);
840out_put_keys: 795out_put_keys:
841 put_futex_key(fshared, &key2); 796 put_futex_key(fshared, &key2);
842out_put_key1: 797out_put_key1:
@@ -869,6 +824,7 @@ retry:
869 hb1 = hash_futex(&key1); 824 hb1 = hash_futex(&key1);
870 hb2 = hash_futex(&key2); 825 hb2 = hash_futex(&key2);
871 826
827retry_private:
872 double_lock_hb(hb1, hb2); 828 double_lock_hb(hb1, hb2);
873 829
874 if (likely(cmpval != NULL)) { 830 if (likely(cmpval != NULL)) {
@@ -877,16 +833,18 @@ retry:
877 ret = get_futex_value_locked(&curval, uaddr1); 833 ret = get_futex_value_locked(&curval, uaddr1);
878 834
879 if (unlikely(ret)) { 835 if (unlikely(ret)) {
880 spin_unlock(&hb1->lock); 836 double_unlock_hb(hb1, hb2);
881 if (hb1 != hb2)
882 spin_unlock(&hb2->lock);
883 837
884 ret = get_user(curval, uaddr1); 838 ret = get_user(curval, uaddr1);
839 if (ret)
840 goto out_put_keys;
885 841
886 if (!ret) 842 if (!fshared)
887 goto retry; 843 goto retry_private;
888 844
889 goto out_put_keys; 845 put_futex_key(fshared, &key2);
846 put_futex_key(fshared, &key1);
847 goto retry;
890 } 848 }
891 if (curval != *cmpval) { 849 if (curval != *cmpval) {
892 ret = -EAGAIN; 850 ret = -EAGAIN;
@@ -923,9 +881,7 @@ retry:
923 } 881 }
924 882
925out_unlock: 883out_unlock:
926 spin_unlock(&hb1->lock); 884 double_unlock_hb(hb1, hb2);
927 if (hb1 != hb2)
928 spin_unlock(&hb2->lock);
929 885
930 /* drop_futex_key_refs() must be called outside the spinlocks. */ 886 /* drop_futex_key_refs() must be called outside the spinlocks. */
931 while (--drop_count >= 0) 887 while (--drop_count >= 0)
@@ -1063,7 +1019,7 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1063 struct futex_pi_state *pi_state = q->pi_state; 1019 struct futex_pi_state *pi_state = q->pi_state;
1064 struct task_struct *oldowner = pi_state->owner; 1020 struct task_struct *oldowner = pi_state->owner;
1065 u32 uval, curval, newval; 1021 u32 uval, curval, newval;
1066 int ret, attempt = 0; 1022 int ret;
1067 1023
1068 /* Owner died? */ 1024 /* Owner died? */
1069 if (!pi_state->owner) 1025 if (!pi_state->owner)
@@ -1076,11 +1032,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
1076 * in the user space variable. This must be atomic as we have 1032 * in the user space variable. This must be atomic as we have
1077 * to preserve the owner died bit here. 1033 * to preserve the owner died bit here.
1078 * 1034 *
1079 * Note: We write the user space value _before_ changing the 1035 * Note: We write the user space value _before_ changing the pi_state
1080 * pi_state because we can fault here. Imagine swapped out 1036 * because we can fault here. Imagine swapped out pages or a fork
1081 * pages or a fork, which was running right before we acquired 1037 * that marked all the anonymous memory readonly for cow.
1082 * mmap_sem, that marked all the anonymous memory readonly for
1083 * cow.
1084 * 1038 *
1085 * Modifying pi_state _before_ the user space value would 1039 * Modifying pi_state _before_ the user space value would
1086 * leave the pi_state in an inconsistent state when we fault 1040 * leave the pi_state in an inconsistent state when we fault
@@ -1136,7 +1090,7 @@ retry:
1136handle_fault: 1090handle_fault:
1137 spin_unlock(q->lock_ptr); 1091 spin_unlock(q->lock_ptr);
1138 1092
1139 ret = futex_handle_fault((unsigned long)uaddr, attempt++); 1093 ret = get_user(uval, uaddr);
1140 1094
1141 spin_lock(q->lock_ptr); 1095 spin_lock(q->lock_ptr);
1142 1096
@@ -1185,10 +1139,11 @@ retry:
1185 if (unlikely(ret != 0)) 1139 if (unlikely(ret != 0))
1186 goto out; 1140 goto out;
1187 1141
1142retry_private:
1188 hb = queue_lock(&q); 1143 hb = queue_lock(&q);
1189 1144
1190 /* 1145 /*
1191 * Access the page AFTER the futex is queued. 1146 * Access the page AFTER the hash-bucket is locked.
1192 * Order is important: 1147 * Order is important:
1193 * 1148 *
1194 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val); 1149 * Userspace waiter: val = var; if (cond(val)) futex_wait(&var, val);
@@ -1204,20 +1159,23 @@ retry:
1204 * a wakeup when *uaddr != val on entry to the syscall. This is 1159 * a wakeup when *uaddr != val on entry to the syscall. This is
1205 * rare, but normal. 1160 * rare, but normal.
1206 * 1161 *
1207 * for shared futexes, we hold the mmap semaphore, so the mapping 1162 * For shared futexes, we hold the mmap semaphore, so the mapping
1208 * cannot have changed since we looked it up in get_futex_key. 1163 * cannot have changed since we looked it up in get_futex_key.
1209 */ 1164 */
1210 ret = get_futex_value_locked(&uval, uaddr); 1165 ret = get_futex_value_locked(&uval, uaddr);
1211 1166
1212 if (unlikely(ret)) { 1167 if (unlikely(ret)) {
1213 queue_unlock(&q, hb); 1168 queue_unlock(&q, hb);
1214 put_futex_key(fshared, &q.key);
1215 1169
1216 ret = get_user(uval, uaddr); 1170 ret = get_user(uval, uaddr);
1171 if (ret)
1172 goto out_put_key;
1217 1173
1218 if (!ret) 1174 if (!fshared)
1219 goto retry; 1175 goto retry_private;
1220 goto out; 1176
1177 put_futex_key(fshared, &q.key);
1178 goto retry;
1221 } 1179 }
1222 ret = -EWOULDBLOCK; 1180 ret = -EWOULDBLOCK;
1223 if (unlikely(uval != val)) { 1181 if (unlikely(uval != val)) {
@@ -1248,16 +1206,13 @@ retry:
1248 if (!abs_time) 1206 if (!abs_time)
1249 schedule(); 1207 schedule();
1250 else { 1208 else {
1251 unsigned long slack;
1252 slack = current->timer_slack_ns;
1253 if (rt_task(current))
1254 slack = 0;
1255 hrtimer_init_on_stack(&t.timer, 1209 hrtimer_init_on_stack(&t.timer,
1256 clockrt ? CLOCK_REALTIME : 1210 clockrt ? CLOCK_REALTIME :
1257 CLOCK_MONOTONIC, 1211 CLOCK_MONOTONIC,
1258 HRTIMER_MODE_ABS); 1212 HRTIMER_MODE_ABS);
1259 hrtimer_init_sleeper(&t, current); 1213 hrtimer_init_sleeper(&t, current);
1260 hrtimer_set_expires_range_ns(&t.timer, *abs_time, slack); 1214 hrtimer_set_expires_range_ns(&t.timer, *abs_time,
1215 current->timer_slack_ns);
1261 1216
1262 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS); 1217 hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
1263 if (!hrtimer_active(&t.timer)) 1218 if (!hrtimer_active(&t.timer))
@@ -1354,7 +1309,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared,
1354 struct futex_hash_bucket *hb; 1309 struct futex_hash_bucket *hb;
1355 u32 uval, newval, curval; 1310 u32 uval, newval, curval;
1356 struct futex_q q; 1311 struct futex_q q;
1357 int ret, lock_taken, ownerdied = 0, attempt = 0; 1312 int ret, lock_taken, ownerdied = 0;
1358 1313
1359 if (refill_pi_state_cache()) 1314 if (refill_pi_state_cache())
1360 return -ENOMEM; 1315 return -ENOMEM;
@@ -1374,7 +1329,7 @@ retry:
1374 if (unlikely(ret != 0)) 1329 if (unlikely(ret != 0))
1375 goto out; 1330 goto out;
1376 1331
1377retry_unlocked: 1332retry_private:
1378 hb = queue_lock(&q); 1333 hb = queue_lock(&q);
1379 1334
1380retry_locked: 1335retry_locked:
@@ -1458,6 +1413,7 @@ retry_locked:
1458 * exit to complete. 1413 * exit to complete.
1459 */ 1414 */
1460 queue_unlock(&q, hb); 1415 queue_unlock(&q, hb);
1416 put_futex_key(fshared, &q.key);
1461 cond_resched(); 1417 cond_resched();
1462 goto retry; 1418 goto retry;
1463 1419
@@ -1564,6 +1520,13 @@ retry_locked:
1564 } 1520 }
1565 } 1521 }
1566 1522
1523 /*
1524 * If fixup_pi_state_owner() faulted and was unable to handle the
1525 * fault, unlock it and return the fault to userspace.
1526 */
1527 if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
1528 rt_mutex_unlock(&q.pi_state->pi_mutex);
1529
1567 /* Unqueue and drop the lock */ 1530 /* Unqueue and drop the lock */
1568 unqueue_me_pi(&q); 1531 unqueue_me_pi(&q);
1569 1532
@@ -1591,22 +1554,18 @@ uaddr_faulted:
1591 */ 1554 */
1592 queue_unlock(&q, hb); 1555 queue_unlock(&q, hb);
1593 1556
1594 if (attempt++) {
1595 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1596 if (ret)
1597 goto out_put_key;
1598 goto retry_unlocked;
1599 }
1600
1601 ret = get_user(uval, uaddr); 1557 ret = get_user(uval, uaddr);
1602 if (!ret) 1558 if (ret)
1603 goto retry; 1559 goto out_put_key;
1604 1560
1605 if (to) 1561 if (!fshared)
1606 destroy_hrtimer_on_stack(&to->timer); 1562 goto retry_private;
1607 return ret; 1563
1564 put_futex_key(fshared, &q.key);
1565 goto retry;
1608} 1566}
1609 1567
1568
1610/* 1569/*
1611 * Userspace attempted a TID -> 0 atomic transition, and failed. 1570 * Userspace attempted a TID -> 0 atomic transition, and failed.
1612 * This is the in-kernel slowpath: we look up the PI state (if any), 1571 * This is the in-kernel slowpath: we look up the PI state (if any),
@@ -1619,7 +1578,7 @@ static int futex_unlock_pi(u32 __user *uaddr, int fshared)
1619 u32 uval; 1578 u32 uval;
1620 struct plist_head *head; 1579 struct plist_head *head;
1621 union futex_key key = FUTEX_KEY_INIT; 1580 union futex_key key = FUTEX_KEY_INIT;
1622 int ret, attempt = 0; 1581 int ret;
1623 1582
1624retry: 1583retry:
1625 if (get_user(uval, uaddr)) 1584 if (get_user(uval, uaddr))
@@ -1635,7 +1594,6 @@ retry:
1635 goto out; 1594 goto out;
1636 1595
1637 hb = hash_futex(&key); 1596 hb = hash_futex(&key);
1638retry_unlocked:
1639 spin_lock(&hb->lock); 1597 spin_lock(&hb->lock);
1640 1598
1641 /* 1599 /*
@@ -1700,14 +1658,7 @@ pi_faulted:
1700 * we have to drop the mmap_sem in order to call get_user(). 1658 * we have to drop the mmap_sem in order to call get_user().
1701 */ 1659 */
1702 spin_unlock(&hb->lock); 1660 spin_unlock(&hb->lock);
1703 1661 put_futex_key(fshared, &key);
1704 if (attempt++) {
1705 ret = futex_handle_fault((unsigned long)uaddr, attempt);
1706 if (ret)
1707 goto out;
1708 uval = 0;
1709 goto retry_unlocked;
1710 }
1711 1662
1712 ret = get_user(uval, uaddr); 1663 ret = get_user(uval, uaddr);
1713 if (!ret) 1664 if (!ret)
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 4dd5b1edac98..3394f8f52964 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
6obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o 6obj-$(CONFIG_NUMA_MIGRATE_IRQ_DESC) += numa_migrate.o
7obj-$(CONFIG_PM_SLEEP) += pm.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 7de11bd64dfe..c687ba4363f2 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -46,7 +46,10 @@ void dynamic_irq_init(unsigned int irq)
46 desc->irq_count = 0; 46 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 47 desc->irqs_unhandled = 0;
48#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
49 cpumask_setall(&desc->affinity); 49 cpumask_setall(desc->affinity);
50#ifdef CONFIG_GENERIC_PENDING_IRQ
51 cpumask_clear(desc->pending_mask);
52#endif
50#endif 53#endif
51 spin_unlock_irqrestore(&desc->lock, flags); 54 spin_unlock_irqrestore(&desc->lock, flags);
52} 55}
@@ -78,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq)
78 desc->handle_irq = handle_bad_irq; 81 desc->handle_irq = handle_bad_irq;
79 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
80 desc->name = NULL; 83 desc->name = NULL;
84 clear_kstat_irqs(desc);
81 spin_unlock_irqrestore(&desc->lock, flags); 85 spin_unlock_irqrestore(&desc->lock, flags);
82} 86}
83 87
@@ -290,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
290 desc->chip->mask_ack(irq); 294 desc->chip->mask_ack(irq);
291 else { 295 else {
292 desc->chip->mask(irq); 296 desc->chip->mask(irq);
293 desc->chip->ack(irq); 297 if (desc->chip->ack)
298 desc->chip->ack(irq);
294 } 299 }
295} 300}
296 301
@@ -476,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
476 kstat_incr_irqs_this_cpu(irq, desc); 481 kstat_incr_irqs_this_cpu(irq, desc);
477 482
478 /* Start handling the irq */ 483 /* Start handling the irq */
479 desc->chip->ack(irq); 484 if (desc->chip->ack)
485 desc->chip->ack(irq);
480 desc = irq_remap_to_desc(irq, desc); 486 desc = irq_remap_to_desc(irq, desc);
481 487
482 /* Mark the IRQ currently in progress.*/ 488 /* Mark the IRQ currently in progress.*/
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3aba8d12f328..9ebf77968871 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -17,6 +17,7 @@
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/rculist.h> 18#include <linux/rculist.h>
19#include <linux/hash.h> 19#include <linux/hash.h>
20#include <linux/bootmem.h>
20 21
21#include "internals.h" 22#include "internals.h"
22 23
@@ -69,6 +70,7 @@ int nr_irqs = NR_IRQS;
69EXPORT_SYMBOL_GPL(nr_irqs); 70EXPORT_SYMBOL_GPL(nr_irqs);
70 71
71#ifdef CONFIG_SPARSE_IRQ 72#ifdef CONFIG_SPARSE_IRQ
73
72static struct irq_desc irq_desc_init = { 74static struct irq_desc irq_desc_init = {
73 .irq = -1, 75 .irq = -1,
74 .status = IRQ_DISABLED, 76 .status = IRQ_DISABLED,
@@ -76,26 +78,25 @@ static struct irq_desc irq_desc_init = {
76 .handle_irq = handle_bad_irq, 78 .handle_irq = handle_bad_irq,
77 .depth = 1, 79 .depth = 1,
78 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 80 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
79#ifdef CONFIG_SMP
80 .affinity = CPU_MASK_ALL
81#endif
82}; 81};
83 82
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 83void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
85{ 84{
86 unsigned long bytes;
87 char *ptr;
88 int node; 85 int node;
89 86 void *ptr;
90 /* Compute how many bytes we need per irq and allocate them */
91 bytes = nr * sizeof(unsigned int);
92 87
93 node = cpu_to_node(cpu); 88 node = cpu_to_node(cpu);
94 ptr = kzalloc_node(bytes, GFP_ATOMIC, node); 89 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
95 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node);
96 90
97 if (ptr) 91 /*
98 desc->kstat_irqs = (unsigned int *)ptr; 92 * don't overwite if can not get new one
93 * init_copy_kstat_irqs() could still use old one
94 */
95 if (ptr) {
96 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n",
97 cpu, node);
98 desc->kstat_irqs = ptr;
99 }
99} 100}
100 101
101static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 102static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
@@ -113,6 +114,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
113 printk(KERN_ERR "can not alloc kstat_irqs\n"); 114 printk(KERN_ERR "can not alloc kstat_irqs\n");
114 BUG_ON(1); 115 BUG_ON(1);
115 } 116 }
117 if (!init_alloc_desc_masks(desc, cpu, false)) {
118 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
119 BUG_ON(1);
120 }
116 arch_init_chip_data(desc, cpu); 121 arch_init_chip_data(desc, cpu);
117} 122}
118 123
@@ -121,7 +126,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu)
121 */ 126 */
122DEFINE_SPINLOCK(sparse_irq_lock); 127DEFINE_SPINLOCK(sparse_irq_lock);
123 128
124struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; 129struct irq_desc **irq_desc_ptrs __read_mostly;
125 130
126static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 131static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
127 [0 ... NR_IRQS_LEGACY-1] = { 132 [0 ... NR_IRQS_LEGACY-1] = {
@@ -131,14 +136,10 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
131 .handle_irq = handle_bad_irq, 136 .handle_irq = handle_bad_irq,
132 .depth = 1, 137 .depth = 1,
133 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 138 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
134#ifdef CONFIG_SMP
135 .affinity = CPU_MASK_ALL
136#endif
137 } 139 }
138}; 140};
139 141
140/* FIXME: use bootmem alloc ...*/ 142static unsigned int *kstat_irqs_legacy;
141static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS];
142 143
143int __init early_irq_init(void) 144int __init early_irq_init(void)
144{ 145{
@@ -148,18 +149,30 @@ int __init early_irq_init(void)
148 149
149 init_irq_default_affinity(); 150 init_irq_default_affinity();
150 151
152 /* initialize nr_irqs based on nr_cpu_ids */
153 arch_probe_nr_irqs();
154 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
155
151 desc = irq_desc_legacy; 156 desc = irq_desc_legacy;
152 legacy_count = ARRAY_SIZE(irq_desc_legacy); 157 legacy_count = ARRAY_SIZE(irq_desc_legacy);
153 158
159 /* allocate irq_desc_ptrs array based on nr_irqs */
160 irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
161
162 /* allocate based on nr_cpu_ids */
163 /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
164 kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
165 sizeof(int));
166
154 for (i = 0; i < legacy_count; i++) { 167 for (i = 0; i < legacy_count; i++) {
155 desc[i].irq = i; 168 desc[i].irq = i;
156 desc[i].kstat_irqs = kstat_irqs_legacy[i]; 169 desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
157 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 170 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
158 171 init_alloc_desc_masks(&desc[i], 0, true);
159 irq_desc_ptrs[i] = desc + i; 172 irq_desc_ptrs[i] = desc + i;
160 } 173 }
161 174
162 for (i = legacy_count; i < NR_IRQS; i++) 175 for (i = legacy_count; i < nr_irqs; i++)
163 irq_desc_ptrs[i] = NULL; 176 irq_desc_ptrs[i] = NULL;
164 177
165 return arch_early_irq_init(); 178 return arch_early_irq_init();
@@ -167,7 +180,10 @@ int __init early_irq_init(void)
167 180
168struct irq_desc *irq_to_desc(unsigned int irq) 181struct irq_desc *irq_to_desc(unsigned int irq)
169{ 182{
170 return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; 183 if (irq_desc_ptrs && irq < nr_irqs)
184 return irq_desc_ptrs[irq];
185
186 return NULL;
171} 187}
172 188
173struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 189struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
@@ -176,10 +192,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
176 unsigned long flags; 192 unsigned long flags;
177 int node; 193 int node;
178 194
179 if (irq >= NR_IRQS) { 195 if (irq >= nr_irqs) {
180 printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", 196 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
181 irq, NR_IRQS); 197 irq, nr_irqs);
182 WARN_ON(1);
183 return NULL; 198 return NULL;
184 } 199 }
185 200
@@ -221,12 +236,10 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
221 .handle_irq = handle_bad_irq, 236 .handle_irq = handle_bad_irq,
222 .depth = 1, 237 .depth = 1,
223 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 238 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
224#ifdef CONFIG_SMP
225 .affinity = CPU_MASK_ALL
226#endif
227 } 239 }
228}; 240};
229 241
242static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS];
230int __init early_irq_init(void) 243int __init early_irq_init(void)
231{ 244{
232 struct irq_desc *desc; 245 struct irq_desc *desc;
@@ -235,12 +248,16 @@ int __init early_irq_init(void)
235 248
236 init_irq_default_affinity(); 249 init_irq_default_affinity();
237 250
251 printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS);
252
238 desc = irq_desc; 253 desc = irq_desc;
239 count = ARRAY_SIZE(irq_desc); 254 count = ARRAY_SIZE(irq_desc);
240 255
241 for (i = 0; i < count; i++) 256 for (i = 0; i < count; i++) {
242 desc[i].irq = i; 257 desc[i].irq = i;
243 258 init_alloc_desc_masks(&desc[i], 0, true);
259 desc[i].kstat_irqs = kstat_irqs_all[i];
260 }
244 return arch_early_irq_init(); 261 return arch_early_irq_init();
245} 262}
246 263
@@ -255,6 +272,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
255} 272}
256#endif /* !CONFIG_SPARSE_IRQ */ 273#endif /* !CONFIG_SPARSE_IRQ */
257 274
275void clear_kstat_irqs(struct irq_desc *desc)
276{
277 memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs)));
278}
279
258/* 280/*
259 * What should we do if we get a hw irq event on an illegal vector? 281 * What should we do if we get a hw irq event on an illegal vector?
260 * Each architecture has to answer this themself. 282 * Each architecture has to answer this themself.
@@ -328,6 +350,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
328 irqreturn_t ret, retval = IRQ_NONE; 350 irqreturn_t ret, retval = IRQ_NONE;
329 unsigned int status = 0; 351 unsigned int status = 0;
330 352
353 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
354
331 if (!(action->flags & IRQF_DISABLED)) 355 if (!(action->flags & IRQF_DISABLED))
332 local_irq_enable_in_hardirq(); 356 local_irq_enable_in_hardirq();
333 357
@@ -347,6 +371,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
347} 371}
348 372
349#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ 373#ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ
374
375#ifdef CONFIG_ENABLE_WARN_DEPRECATED
376# warning __do_IRQ is deprecated. Please convert to proper flow handlers
377#endif
378
350/** 379/**
351 * __do_IRQ - original all in one highlevel IRQ handler 380 * __do_IRQ - original all in one highlevel IRQ handler
352 * @irq: the interrupt number 381 * @irq: the interrupt number
@@ -467,12 +496,10 @@ void early_init_irq_lock_class(void)
467 } 496 }
468} 497}
469 498
470#ifdef CONFIG_SPARSE_IRQ
471unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 499unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
472{ 500{
473 struct irq_desc *desc = irq_to_desc(irq); 501 struct irq_desc *desc = irq_to_desc(irq);
474 return desc ? desc->kstat_irqs[cpu] : 0; 502 return desc ? desc->kstat_irqs[cpu] : 0;
475} 503}
476#endif
477EXPORT_SYMBOL(kstat_irqs_cpu); 504EXPORT_SYMBOL(kstat_irqs_cpu);
478 505
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e6d0a43cc125..01ce20eab38f 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -12,11 +12,21 @@ extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
12 12
13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
14 unsigned long flags); 14 unsigned long flags);
15extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
16extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
15 17
16extern struct lock_class_key irq_desc_lock_class; 18extern struct lock_class_key irq_desc_lock_class;
17extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); 19extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr);
20extern void clear_kstat_irqs(struct irq_desc *desc);
18extern spinlock_t sparse_irq_lock; 21extern spinlock_t sparse_irq_lock;
22
23#ifdef CONFIG_SPARSE_IRQ
24/* irq_desc_ptrs allocated at boot time */
25extern struct irq_desc **irq_desc_ptrs;
26#else
27/* irq_desc_ptrs is a fixed size array */
19extern struct irq_desc *irq_desc_ptrs[NR_IRQS]; 28extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
29#endif
20 30
21#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
22extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 32extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 291f03664552..1516ab77355c 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -90,14 +90,14 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
90 90
91#ifdef CONFIG_GENERIC_PENDING_IRQ 91#ifdef CONFIG_GENERIC_PENDING_IRQ
92 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 92 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
93 cpumask_copy(&desc->affinity, cpumask); 93 cpumask_copy(desc->affinity, cpumask);
94 desc->chip->set_affinity(irq, cpumask); 94 desc->chip->set_affinity(irq, cpumask);
95 } else { 95 } else {
96 desc->status |= IRQ_MOVE_PENDING; 96 desc->status |= IRQ_MOVE_PENDING;
97 cpumask_copy(&desc->pending_mask, cpumask); 97 cpumask_copy(desc->pending_mask, cpumask);
98 } 98 }
99#else 99#else
100 cpumask_copy(&desc->affinity, cpumask); 100 cpumask_copy(desc->affinity, cpumask);
101 desc->chip->set_affinity(irq, cpumask); 101 desc->chip->set_affinity(irq, cpumask);
102#endif 102#endif
103 desc->status |= IRQ_AFFINITY_SET; 103 desc->status |= IRQ_AFFINITY_SET;
@@ -109,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109/* 109/*
110 * Generic version of the affinity autoselector. 110 * Generic version of the affinity autoselector.
111 */ 111 */
112int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) 112static int setup_affinity(unsigned int irq, struct irq_desc *desc)
113{ 113{
114 if (!irq_can_set_affinity(irq)) 114 if (!irq_can_set_affinity(irq))
115 return 0; 115 return 0;
@@ -119,21 +119,21 @@ int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc)
119 * one of the targets is online. 119 * one of the targets is online.
120 */ 120 */
121 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 121 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) {
122 if (cpumask_any_and(&desc->affinity, cpu_online_mask) 122 if (cpumask_any_and(desc->affinity, cpu_online_mask)
123 < nr_cpu_ids) 123 < nr_cpu_ids)
124 goto set_affinity; 124 goto set_affinity;
125 else 125 else
126 desc->status &= ~IRQ_AFFINITY_SET; 126 desc->status &= ~IRQ_AFFINITY_SET;
127 } 127 }
128 128
129 cpumask_and(&desc->affinity, cpu_online_mask, irq_default_affinity); 129 cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity);
130set_affinity: 130set_affinity:
131 desc->chip->set_affinity(irq, &desc->affinity); 131 desc->chip->set_affinity(irq, desc->affinity);
132 132
133 return 0; 133 return 0;
134} 134}
135#else 135#else
136static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) 136static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
137{ 137{
138 return irq_select_affinity(irq); 138 return irq_select_affinity(irq);
139} 139}
@@ -149,19 +149,33 @@ int irq_select_affinity_usr(unsigned int irq)
149 int ret; 149 int ret;
150 150
151 spin_lock_irqsave(&desc->lock, flags); 151 spin_lock_irqsave(&desc->lock, flags);
152 ret = do_irq_select_affinity(irq, desc); 152 ret = setup_affinity(irq, desc);
153 spin_unlock_irqrestore(&desc->lock, flags); 153 spin_unlock_irqrestore(&desc->lock, flags);
154 154
155 return ret; 155 return ret;
156} 156}
157 157
158#else 158#else
159static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) 159static inline int setup_affinity(unsigned int irq, struct irq_desc *desc)
160{ 160{
161 return 0; 161 return 0;
162} 162}
163#endif 163#endif
164 164
165void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
166{
167 if (suspend) {
168 if (!desc->action || (desc->action->flags & IRQF_TIMER))
169 return;
170 desc->status |= IRQ_SUSPENDED;
171 }
172
173 if (!desc->depth++) {
174 desc->status |= IRQ_DISABLED;
175 desc->chip->disable(irq);
176 }
177}
178
165/** 179/**
166 * disable_irq_nosync - disable an irq without waiting 180 * disable_irq_nosync - disable an irq without waiting
167 * @irq: Interrupt to disable 181 * @irq: Interrupt to disable
@@ -182,10 +196,7 @@ void disable_irq_nosync(unsigned int irq)
182 return; 196 return;
183 197
184 spin_lock_irqsave(&desc->lock, flags); 198 spin_lock_irqsave(&desc->lock, flags);
185 if (!desc->depth++) { 199 __disable_irq(desc, irq, false);
186 desc->status |= IRQ_DISABLED;
187 desc->chip->disable(irq);
188 }
189 spin_unlock_irqrestore(&desc->lock, flags); 200 spin_unlock_irqrestore(&desc->lock, flags);
190} 201}
191EXPORT_SYMBOL(disable_irq_nosync); 202EXPORT_SYMBOL(disable_irq_nosync);
@@ -215,15 +226,21 @@ void disable_irq(unsigned int irq)
215} 226}
216EXPORT_SYMBOL(disable_irq); 227EXPORT_SYMBOL(disable_irq);
217 228
218static void __enable_irq(struct irq_desc *desc, unsigned int irq) 229void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
219{ 230{
231 if (resume)
232 desc->status &= ~IRQ_SUSPENDED;
233
220 switch (desc->depth) { 234 switch (desc->depth) {
221 case 0: 235 case 0:
236 err_out:
222 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 237 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
223 break; 238 break;
224 case 1: { 239 case 1: {
225 unsigned int status = desc->status & ~IRQ_DISABLED; 240 unsigned int status = desc->status & ~IRQ_DISABLED;
226 241
242 if (desc->status & IRQ_SUSPENDED)
243 goto err_out;
227 /* Prevent probing on this irq: */ 244 /* Prevent probing on this irq: */
228 desc->status = status | IRQ_NOPROBE; 245 desc->status = status | IRQ_NOPROBE;
229 check_irq_resend(desc, irq); 246 check_irq_resend(desc, irq);
@@ -253,7 +270,7 @@ void enable_irq(unsigned int irq)
253 return; 270 return;
254 271
255 spin_lock_irqsave(&desc->lock, flags); 272 spin_lock_irqsave(&desc->lock, flags);
256 __enable_irq(desc, irq); 273 __enable_irq(desc, irq, false);
257 spin_unlock_irqrestore(&desc->lock, flags); 274 spin_unlock_irqrestore(&desc->lock, flags);
258} 275}
259EXPORT_SYMBOL(enable_irq); 276EXPORT_SYMBOL(enable_irq);
@@ -389,9 +406,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
389 * allocate special interrupts that are part of the architecture. 406 * allocate special interrupts that are part of the architecture.
390 */ 407 */
391static int 408static int
392__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) 409__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
393{ 410{
394 struct irqaction *old, **p; 411 struct irqaction *old, **old_ptr;
395 const char *old_name = NULL; 412 const char *old_name = NULL;
396 unsigned long flags; 413 unsigned long flags;
397 int shared = 0; 414 int shared = 0;
@@ -423,8 +440,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
423 * The following block of code has to be executed atomically 440 * The following block of code has to be executed atomically
424 */ 441 */
425 spin_lock_irqsave(&desc->lock, flags); 442 spin_lock_irqsave(&desc->lock, flags);
426 p = &desc->action; 443 old_ptr = &desc->action;
427 old = *p; 444 old = *old_ptr;
428 if (old) { 445 if (old) {
429 /* 446 /*
430 * Can't share interrupts unless both agree to and are 447 * Can't share interrupts unless both agree to and are
@@ -447,8 +464,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
447 464
448 /* add new interrupt at end of irq queue */ 465 /* add new interrupt at end of irq queue */
449 do { 466 do {
450 p = &old->next; 467 old_ptr = &old->next;
451 old = *p; 468 old = *old_ptr;
452 } while (old); 469 } while (old);
453 shared = 1; 470 shared = 1;
454 } 471 }
@@ -488,7 +505,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
488 desc->status |= IRQ_NO_BALANCING; 505 desc->status |= IRQ_NO_BALANCING;
489 506
490 /* Set default affinity mask once everything is setup */ 507 /* Set default affinity mask once everything is setup */
491 do_irq_select_affinity(irq, desc); 508 setup_affinity(irq, desc);
492 509
493 } else if ((new->flags & IRQF_TRIGGER_MASK) 510 } else if ((new->flags & IRQF_TRIGGER_MASK)
494 && (new->flags & IRQF_TRIGGER_MASK) 511 && (new->flags & IRQF_TRIGGER_MASK)
@@ -499,7 +516,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
499 (int)(new->flags & IRQF_TRIGGER_MASK)); 516 (int)(new->flags & IRQF_TRIGGER_MASK));
500 } 517 }
501 518
502 *p = new; 519 *old_ptr = new;
503 520
504 /* Reset broken irq detection when installing new handler */ 521 /* Reset broken irq detection when installing new handler */
505 desc->irq_count = 0; 522 desc->irq_count = 0;
@@ -511,7 +528,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
511 */ 528 */
512 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 529 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
513 desc->status &= ~IRQ_SPURIOUS_DISABLED; 530 desc->status &= ~IRQ_SPURIOUS_DISABLED;
514 __enable_irq(desc, irq); 531 __enable_irq(desc, irq, false);
515 } 532 }
516 533
517 spin_unlock_irqrestore(&desc->lock, flags); 534 spin_unlock_irqrestore(&desc->lock, flags);
@@ -549,90 +566,117 @@ int setup_irq(unsigned int irq, struct irqaction *act)
549 566
550 return __setup_irq(irq, desc, act); 567 return __setup_irq(irq, desc, act);
551} 568}
569EXPORT_SYMBOL_GPL(setup_irq);
552 570
553/** 571 /*
554 * free_irq - free an interrupt 572 * Internal function to unregister an irqaction - used to free
555 * @irq: Interrupt line to free 573 * regular and special interrupts that are part of the architecture.
556 * @dev_id: Device identity to free
557 *
558 * Remove an interrupt handler. The handler is removed and if the
559 * interrupt line is no longer in use by any driver it is disabled.
560 * On a shared IRQ the caller must ensure the interrupt is disabled
561 * on the card it drives before calling this function. The function
562 * does not return until any executing interrupts for this IRQ
563 * have completed.
564 *
565 * This function must not be called from interrupt context.
566 */ 574 */
567void free_irq(unsigned int irq, void *dev_id) 575static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
568{ 576{
569 struct irq_desc *desc = irq_to_desc(irq); 577 struct irq_desc *desc = irq_to_desc(irq);
570 struct irqaction **p; 578 struct irqaction *action, **action_ptr;
571 unsigned long flags; 579 unsigned long flags;
572 580
573 WARN_ON(in_interrupt()); 581 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
574 582
575 if (!desc) 583 if (!desc)
576 return; 584 return NULL;
577 585
578 spin_lock_irqsave(&desc->lock, flags); 586 spin_lock_irqsave(&desc->lock, flags);
579 p = &desc->action; 587
588 /*
589 * There can be multiple actions per IRQ descriptor, find the right
590 * one based on the dev_id:
591 */
592 action_ptr = &desc->action;
580 for (;;) { 593 for (;;) {
581 struct irqaction *action = *p; 594 action = *action_ptr;
582 595
583 if (action) { 596 if (!action) {
584 struct irqaction **pp = p; 597 WARN(1, "Trying to free already-free IRQ %d\n", irq);
598 spin_unlock_irqrestore(&desc->lock, flags);
585 599
586 p = &action->next; 600 return NULL;
587 if (action->dev_id != dev_id) 601 }
588 continue;
589 602
590 /* Found it - now remove it from the list of entries */ 603 if (action->dev_id == dev_id)
591 *pp = action->next; 604 break;
605 action_ptr = &action->next;
606 }
592 607
593 /* Currently used only by UML, might disappear one day.*/ 608 /* Found it - now remove it from the list of entries: */
609 *action_ptr = action->next;
610
611 /* Currently used only by UML, might disappear one day: */
594#ifdef CONFIG_IRQ_RELEASE_METHOD 612#ifdef CONFIG_IRQ_RELEASE_METHOD
595 if (desc->chip->release) 613 if (desc->chip->release)
596 desc->chip->release(irq, dev_id); 614 desc->chip->release(irq, dev_id);
597#endif 615#endif
598 616
599 if (!desc->action) { 617 /* If this was the last handler, shut down the IRQ line: */
600 desc->status |= IRQ_DISABLED; 618 if (!desc->action) {
601 if (desc->chip->shutdown) 619 desc->status |= IRQ_DISABLED;
602 desc->chip->shutdown(irq); 620 if (desc->chip->shutdown)
603 else 621 desc->chip->shutdown(irq);
604 desc->chip->disable(irq); 622 else
605 } 623 desc->chip->disable(irq);
606 spin_unlock_irqrestore(&desc->lock, flags); 624 }
607 unregister_handler_proc(irq, action); 625 spin_unlock_irqrestore(&desc->lock, flags);
626
627 unregister_handler_proc(irq, action);
628
629 /* Make sure it's not being used on another CPU: */
630 synchronize_irq(irq);
608 631
609 /* Make sure it's not being used on another CPU */
610 synchronize_irq(irq);
611#ifdef CONFIG_DEBUG_SHIRQ
612 /*
613 * It's a shared IRQ -- the driver ought to be
614 * prepared for it to happen even now it's
615 * being freed, so let's make sure.... We do
616 * this after actually deregistering it, to
617 * make sure that a 'real' IRQ doesn't run in
618 * parallel with our fake
619 */
620 if (action->flags & IRQF_SHARED) {
621 local_irq_save(flags);
622 action->handler(irq, dev_id);
623 local_irq_restore(flags);
624 }
625#endif
626 kfree(action);
627 return;
628 }
629 printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq);
630#ifdef CONFIG_DEBUG_SHIRQ 632#ifdef CONFIG_DEBUG_SHIRQ
631 dump_stack(); 633 /*
632#endif 634 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
633 spin_unlock_irqrestore(&desc->lock, flags); 635 * event to happen even now it's being freed, so let's make sure that
634 return; 636 * is so by doing an extra call to the handler ....
637 *
638 * ( We do this after actually deregistering it, to make sure that a
639 * 'real' IRQ doesn't run in * parallel with our fake. )
640 */
641 if (action->flags & IRQF_SHARED) {
642 local_irq_save(flags);
643 action->handler(irq, dev_id);
644 local_irq_restore(flags);
635 } 645 }
646#endif
647 return action;
648}
649
650/**
651 * remove_irq - free an interrupt
652 * @irq: Interrupt line to free
653 * @act: irqaction for the interrupt
654 *
655 * Used to remove interrupts statically setup by the early boot process.
656 */
657void remove_irq(unsigned int irq, struct irqaction *act)
658{
659 __free_irq(irq, act->dev_id);
660}
661EXPORT_SYMBOL_GPL(remove_irq);
662
663/**
664 * free_irq - free an interrupt allocated with request_irq
665 * @irq: Interrupt line to free
666 * @dev_id: Device identity to free
667 *
668 * Remove an interrupt handler. The handler is removed and if the
669 * interrupt line is no longer in use by any driver it is disabled.
670 * On a shared IRQ the caller must ensure the interrupt is disabled
671 * on the card it drives before calling this function. The function
672 * does not return until any executing interrupts for this IRQ
673 * have completed.
674 *
675 * This function must not be called from interrupt context.
676 */
677void free_irq(unsigned int irq, void *dev_id)
678{
679 kfree(__free_irq(irq, dev_id));
636} 680}
637EXPORT_SYMBOL(free_irq); 681EXPORT_SYMBOL(free_irq);
638 682
@@ -679,11 +723,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
679 * the behavior is classified as "will not fix" so we need to 723 * the behavior is classified as "will not fix" so we need to
680 * start nudging drivers away from using that idiom. 724 * start nudging drivers away from using that idiom.
681 */ 725 */
682 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) 726 if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) ==
683 == (IRQF_SHARED|IRQF_DISABLED)) 727 (IRQF_SHARED|IRQF_DISABLED)) {
684 pr_warning("IRQ %d/%s: IRQF_DISABLED is not " 728 pr_warning(
685 "guaranteed on shared IRQs\n", 729 "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n",
686 irq, devname); 730 irq, devname);
731 }
687 732
688#ifdef CONFIG_LOCKDEP 733#ifdef CONFIG_LOCKDEP
689 /* 734 /*
@@ -709,15 +754,13 @@ int request_irq(unsigned int irq, irq_handler_t handler,
709 if (!handler) 754 if (!handler)
710 return -EINVAL; 755 return -EINVAL;
711 756
712 action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); 757 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
713 if (!action) 758 if (!action)
714 return -ENOMEM; 759 return -ENOMEM;
715 760
716 action->handler = handler; 761 action->handler = handler;
717 action->flags = irqflags; 762 action->flags = irqflags;
718 cpus_clear(action->mask);
719 action->name = devname; 763 action->name = devname;
720 action->next = NULL;
721 action->dev_id = dev_id; 764 action->dev_id = dev_id;
722 765
723 retval = __setup_irq(irq, desc, action); 766 retval = __setup_irq(irq, desc, action);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bd72329e630c..e05ad9be43b7 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -18,7 +18,7 @@ void move_masked_irq(int irq)
18 18
19 desc->status &= ~IRQ_MOVE_PENDING; 19 desc->status &= ~IRQ_MOVE_PENDING;
20 20
21 if (unlikely(cpumask_empty(&desc->pending_mask))) 21 if (unlikely(cpumask_empty(desc->pending_mask)))
22 return; 22 return;
23 23
24 if (!desc->chip->set_affinity) 24 if (!desc->chip->set_affinity)
@@ -38,13 +38,13 @@ void move_masked_irq(int irq)
38 * For correct operation this depends on the caller 38 * For correct operation this depends on the caller
39 * masking the irqs. 39 * masking the irqs.
40 */ 40 */
41 if (likely(cpumask_any_and(&desc->pending_mask, cpu_online_mask) 41 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask)
42 < nr_cpu_ids)) { 42 < nr_cpu_ids)) {
43 cpumask_and(&desc->affinity, 43 cpumask_and(desc->affinity,
44 &desc->pending_mask, cpu_online_mask); 44 desc->pending_mask, cpu_online_mask);
45 desc->chip->set_affinity(irq, &desc->affinity); 45 desc->chip->set_affinity(irq, desc->affinity);
46 } 46 }
47 cpumask_clear(&desc->pending_mask); 47 cpumask_clear(desc->pending_mask);
48} 48}
49 49
50void move_native_irq(int irq) 50void move_native_irq(int irq)
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index acd88356ac76..243d6121e50e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc,
17 struct irq_desc *desc, 17 struct irq_desc *desc,
18 int cpu, int nr) 18 int cpu, int nr)
19{ 19{
20 unsigned long bytes;
21
22 init_kstat_irqs(desc, cpu, nr); 20 init_kstat_irqs(desc, cpu, nr);
23 21
24 if (desc->kstat_irqs != old_desc->kstat_irqs) { 22 if (desc->kstat_irqs != old_desc->kstat_irqs)
25 /* Compute how many bytes we need per irq and allocate them */ 23 memcpy(desc->kstat_irqs, old_desc->kstat_irqs,
26 bytes = nr * sizeof(unsigned int); 24 nr * sizeof(*desc->kstat_irqs));
27
28 memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes);
29 }
30} 25}
31 26
32static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) 27static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -38,15 +33,22 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc)
38 old_desc->kstat_irqs = NULL; 33 old_desc->kstat_irqs = NULL;
39} 34}
40 35
41static void init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, 36static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
42 struct irq_desc *desc, int cpu) 37 struct irq_desc *desc, int cpu)
43{ 38{
44 memcpy(desc, old_desc, sizeof(struct irq_desc)); 39 memcpy(desc, old_desc, sizeof(struct irq_desc));
40 if (!init_alloc_desc_masks(desc, cpu, false)) {
41 printk(KERN_ERR "irq %d: can not get new irq_desc cpumask "
42 "for migration.\n", irq);
43 return false;
44 }
45 spin_lock_init(&desc->lock); 45 spin_lock_init(&desc->lock);
46 desc->cpu = cpu; 46 desc->cpu = cpu;
47 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 47 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); 48 init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids);
49 init_copy_desc_masks(old_desc, desc);
49 arch_init_copy_chip_data(old_desc, desc, cpu); 50 arch_init_copy_chip_data(old_desc, desc, cpu);
51 return true;
50} 52}
51 53
52static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) 54static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc)
@@ -76,12 +78,18 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
76 node = cpu_to_node(cpu); 78 node = cpu_to_node(cpu);
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 79 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
78 if (!desc) { 80 if (!desc) {
79 printk(KERN_ERR "irq %d: can not get new irq_desc for migration.\n", irq); 81 printk(KERN_ERR "irq %d: can not get new irq_desc "
82 "for migration.\n", irq);
83 /* still use old one */
84 desc = old_desc;
85 goto out_unlock;
86 }
87 if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) {
80 /* still use old one */ 88 /* still use old one */
89 kfree(desc);
81 desc = old_desc; 90 desc = old_desc;
82 goto out_unlock; 91 goto out_unlock;
83 } 92 }
84 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
85 93
86 irq_desc_ptrs[irq] = desc; 94 irq_desc_ptrs[irq] = desc;
87 spin_unlock_irqrestore(&sparse_irq_lock, flags); 95 spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
new file mode 100644
index 000000000000..638d8bedec14
--- /dev/null
+++ b/kernel/irq/pm.c
@@ -0,0 +1,79 @@
1/*
2 * linux/kernel/irq/pm.c
3 *
4 * Copyright (C) 2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
5 *
6 * This file contains power management functions related to interrupts.
7 */
8
9#include <linux/irq.h>
10#include <linux/module.h>
11#include <linux/interrupt.h>
12
13#include "internals.h"
14
15/**
16 * suspend_device_irqs - disable all currently enabled interrupt lines
17 *
18 * During system-wide suspend or hibernation device interrupts need to be
19 * disabled at the chip level and this function is provided for this purpose.
20 * It disables all interrupt lines that are enabled at the moment and sets the
21 * IRQ_SUSPENDED flag for them.
22 */
23void suspend_device_irqs(void)
24{
25 struct irq_desc *desc;
26 int irq;
27
28 for_each_irq_desc(irq, desc) {
29 unsigned long flags;
30
31 spin_lock_irqsave(&desc->lock, flags);
32 __disable_irq(desc, irq, true);
33 spin_unlock_irqrestore(&desc->lock, flags);
34 }
35
36 for_each_irq_desc(irq, desc)
37 if (desc->status & IRQ_SUSPENDED)
38 synchronize_irq(irq);
39}
40EXPORT_SYMBOL_GPL(suspend_device_irqs);
41
42/**
43 * resume_device_irqs - enable interrupt lines disabled by suspend_device_irqs()
44 *
45 * Enable all interrupt lines previously disabled by suspend_device_irqs() that
46 * have the IRQ_SUSPENDED flag set.
47 */
48void resume_device_irqs(void)
49{
50 struct irq_desc *desc;
51 int irq;
52
53 for_each_irq_desc(irq, desc) {
54 unsigned long flags;
55
56 if (!(desc->status & IRQ_SUSPENDED))
57 continue;
58
59 spin_lock_irqsave(&desc->lock, flags);
60 __enable_irq(desc, irq, true);
61 spin_unlock_irqrestore(&desc->lock, flags);
62 }
63}
64EXPORT_SYMBOL_GPL(resume_device_irqs);
65
66/**
67 * check_wakeup_irqs - check if any wake-up interrupts are pending
68 */
69int check_wakeup_irqs(void)
70{
71 struct irq_desc *desc;
72 int irq;
73
74 for_each_irq_desc(irq, desc)
75 if ((desc->status & IRQ_WAKEUP) && (desc->status & IRQ_PENDING))
76 return -EBUSY;
77
78 return 0;
79}
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index aae3f742bcec..692363dd591f 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -20,11 +20,11 @@ static struct proc_dir_entry *root_irq_dir;
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_to_desc((long)m->private); 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 const struct cpumask *mask = &desc->affinity; 23 const struct cpumask *mask = desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
26 if (desc->status & IRQ_MOVE_PENDING) 26 if (desc->status & IRQ_MOVE_PENDING)
27 mask = &desc->pending_mask; 27 mask = desc->pending_mask;
28#endif 28#endif
29 seq_cpumask(m, mask); 29 seq_cpumask(m, mask);
30 seq_putc(m, '\n'); 30 seq_putc(m, '\n');
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dd364c11e56e..4d568294de3e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -104,7 +104,7 @@ static int misrouted_irq(int irq)
104 return ok; 104 return ok;
105} 105}
106 106
107static void poll_spurious_irqs(unsigned long dummy) 107static void poll_all_shared_irqs(void)
108{ 108{
109 struct irq_desc *desc; 109 struct irq_desc *desc;
110 int i; 110 int i;
@@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy)
123 123
124 try_one_irq(i, desc); 124 try_one_irq(i, desc);
125 } 125 }
126}
127
128static void poll_spurious_irqs(unsigned long dummy)
129{
130 poll_all_shared_irqs();
126 131
127 mod_timer(&poll_spurious_irq_timer, 132 mod_timer(&poll_spurious_irq_timer,
128 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 133 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
129} 134}
130 135
136#ifdef CONFIG_DEBUG_SHIRQ
137void debug_poll_all_shared_irqs(void)
138{
139 poll_all_shared_irqs();
140}
141#endif
142
131/* 143/*
132 * If 99,900 of the previous 100,000 interrupts have not been handled 144 * If 99,900 of the previous 100,000 interrupts have not been handled
133 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 145 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 483899578259..5a758c6e4950 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -42,7 +42,7 @@
42note_buf_t* crash_notes; 42note_buf_t* crash_notes;
43 43
44/* vmcoreinfo stuff */ 44/* vmcoreinfo stuff */
45unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; 45static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];
46u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; 46u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
47size_t vmcoreinfo_size; 47size_t vmcoreinfo_size;
48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); 48size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
@@ -1130,7 +1130,7 @@ void crash_save_cpu(struct pt_regs *regs, int cpu)
1130 return; 1130 return;
1131 memset(&prstatus, 0, sizeof(prstatus)); 1131 memset(&prstatus, 0, sizeof(prstatus));
1132 prstatus.pr_pid = current->pid; 1132 prstatus.pr_pid = current->pid;
1133 elf_core_copy_regs(&prstatus.pr_reg, regs); 1133 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
1134 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, 1134 buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
1135 &prstatus, sizeof(prstatus)); 1135 &prstatus, sizeof(prstatus));
1136 final_note(buf); 1136 final_note(buf);
@@ -1409,6 +1409,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1409 VMCOREINFO_OFFSET(list_head, prev); 1409 VMCOREINFO_OFFSET(list_head, prev);
1410 VMCOREINFO_OFFSET(vm_struct, addr); 1410 VMCOREINFO_OFFSET(vm_struct, addr);
1411 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); 1411 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1412 log_buf_kexec_setup();
1412 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); 1413 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1413 VMCOREINFO_NUMBER(NR_FREE_PAGES); 1414 VMCOREINFO_NUMBER(NR_FREE_PAGES);
1414 VMCOREINFO_NUMBER(PG_lru); 1415 VMCOREINFO_NUMBER(PG_lru);
@@ -1450,11 +1451,7 @@ int kernel_kexec(void)
1450 error = device_suspend(PMSG_FREEZE); 1451 error = device_suspend(PMSG_FREEZE);
1451 if (error) 1452 if (error)
1452 goto Resume_console; 1453 goto Resume_console;
1453 error = disable_nonboot_cpus();
1454 if (error)
1455 goto Resume_devices;
1456 device_pm_lock(); 1454 device_pm_lock();
1457 local_irq_disable();
1458 /* At this point, device_suspend() has been called, 1455 /* At this point, device_suspend() has been called,
1459 * but *not* device_power_down(). We *must* 1456 * but *not* device_power_down(). We *must*
1460 * device_power_down() now. Otherwise, drivers for 1457 * device_power_down() now. Otherwise, drivers for
@@ -1464,12 +1461,15 @@ int kernel_kexec(void)
1464 */ 1461 */
1465 error = device_power_down(PMSG_FREEZE); 1462 error = device_power_down(PMSG_FREEZE);
1466 if (error) 1463 if (error)
1467 goto Enable_irqs; 1464 goto Resume_devices;
1468 1465 error = disable_nonboot_cpus();
1466 if (error)
1467 goto Enable_cpus;
1468 local_irq_disable();
1469 /* Suspend system devices */ 1469 /* Suspend system devices */
1470 error = sysdev_suspend(PMSG_FREEZE); 1470 error = sysdev_suspend(PMSG_FREEZE);
1471 if (error) 1471 if (error)
1472 goto Power_up_devices; 1472 goto Enable_irqs;
1473 } else 1473 } else
1474#endif 1474#endif
1475 { 1475 {
@@ -1483,13 +1483,13 @@ int kernel_kexec(void)
1483#ifdef CONFIG_KEXEC_JUMP 1483#ifdef CONFIG_KEXEC_JUMP
1484 if (kexec_image->preserve_context) { 1484 if (kexec_image->preserve_context) {
1485 sysdev_resume(); 1485 sysdev_resume();
1486 Power_up_devices:
1487 device_power_up(PMSG_RESTORE);
1488 Enable_irqs: 1486 Enable_irqs:
1489 local_irq_enable(); 1487 local_irq_enable();
1490 device_pm_unlock(); 1488 Enable_cpus:
1491 enable_nonboot_cpus(); 1489 enable_nonboot_cpus();
1490 device_power_up(PMSG_RESTORE);
1492 Resume_devices: 1491 Resume_devices:
1492 device_pm_unlock();
1493 device_resume(PMSG_RESTORE); 1493 device_resume(PMSG_RESTORE);
1494 Resume_console: 1494 Resume_console:
1495 resume_console(); 1495 resume_console();
diff --git a/kernel/kmod.c b/kernel/kmod.c
index a27a5f64443d..f0c8f545180d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -167,7 +167,7 @@ static int ____call_usermodehelper(void *data)
167 } 167 }
168 168
169 /* We can run anywhere, unlike our parent keventd(). */ 169 /* We can run anywhere, unlike our parent keventd(). */
170 set_cpus_allowed_ptr(current, CPU_MASK_ALL_PTR); 170 set_cpus_allowed_ptr(current, cpu_all_mask);
171 171
172 /* 172 /*
173 * Our parent is keventd, which runs with elevated scheduling priority. 173 * Our parent is keventd, which runs with elevated scheduling priority.
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 4fbc456f393d..84bbadd4d021 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -110,7 +110,7 @@ static void create_kthread(struct kthread_create_info *create)
110 */ 110 */
111 sched_setscheduler(create->result, SCHED_NORMAL, &param); 111 sched_setscheduler(create->result, SCHED_NORMAL, &param);
112 set_user_nice(create->result, KTHREAD_NICE_LEVEL); 112 set_user_nice(create->result, KTHREAD_NICE_LEVEL);
113 set_cpus_allowed_ptr(create->result, CPU_MASK_ALL_PTR); 113 set_cpus_allowed_ptr(create->result, cpu_all_mask);
114 } 114 }
115 complete(&create->done); 115 complete(&create->done);
116} 116}
@@ -240,7 +240,7 @@ int kthreadd(void *unused)
240 set_task_comm(tsk, "kthreadd"); 240 set_task_comm(tsk, "kthreadd");
241 ignore_signals(tsk); 241 ignore_signals(tsk);
242 set_user_nice(tsk, KTHREAD_NICE_LEVEL); 242 set_user_nice(tsk, KTHREAD_NICE_LEVEL);
243 set_cpus_allowed_ptr(tsk, CPU_MASK_ALL_PTR); 243 set_cpus_allowed_ptr(tsk, cpu_all_mask);
244 244
245 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG; 245 current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
246 246
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 449db466bdbc..ca07c5c0c914 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -9,6 +9,44 @@
9 * as published by the Free Software Foundation; version 2 9 * as published by the Free Software Foundation; version 2
10 * of the License. 10 * of the License.
11 */ 11 */
12
13/*
14 * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
15 * used by the "latencytop" userspace tool. The latency that is tracked is not
16 * the 'traditional' interrupt latency (which is primarily caused by something
17 * else consuming CPU), but instead, it is the latency an application encounters
18 * because the kernel sleeps on its behalf for various reasons.
19 *
20 * This code tracks 2 levels of statistics:
21 * 1) System level latency
22 * 2) Per process latency
23 *
24 * The latency is stored in fixed sized data structures in an accumulated form;
25 * if the "same" latency cause is hit twice, this will be tracked as one entry
26 * in the data structure. Both the count, total accumulated latency and maximum
27 * latency are tracked in this data structure. When the fixed size structure is
28 * full, no new causes are tracked until the buffer is flushed by writing to
29 * the /proc file; the userspace tool does this on a regular basis.
30 *
31 * A latency cause is identified by a stringified backtrace at the point that
32 * the scheduler gets invoked. The userland tool will use this string to
33 * identify the cause of the latency in human readable form.
34 *
35 * The information is exported via /proc/latency_stats and /proc/<pid>/latency.
36 * These files look like this:
37 *
38 * Latency Top version : v0.1
39 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
40 * | | | |
41 * | | | +----> the stringified backtrace
42 * | | +---------> The maximum latency for this entry in microseconds
43 * | +--------------> The accumulated latency for this entry (microseconds)
44 * +-------------------> The number of times this entry is hit
45 *
46 * (note: the average latency is the accumulated latency divided by the number
47 * of times)
48 */
49
12#include <linux/latencytop.h> 50#include <linux/latencytop.h>
13#include <linux/kallsyms.h> 51#include <linux/kallsyms.h>
14#include <linux/seq_file.h> 52#include <linux/seq_file.h>
@@ -72,7 +110,7 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
72 firstnonnull = i; 110 firstnonnull = i;
73 continue; 111 continue;
74 } 112 }
75 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
76 unsigned long record = lat->backtrace[q]; 114 unsigned long record = lat->backtrace[q];
77 115
78 if (latency_record[i].backtrace[q] != record) { 116 if (latency_record[i].backtrace[q] != record) {
@@ -101,31 +139,52 @@ account_global_scheduler_latency(struct task_struct *tsk, struct latency_record
101 memcpy(&latency_record[i], lat, sizeof(struct latency_record)); 139 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
102} 140}
103 141
104static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) 142/*
143 * Iterator to store a backtrace into a latency record entry
144 */
145static inline void store_stacktrace(struct task_struct *tsk,
146 struct latency_record *lat)
105{ 147{
106 struct stack_trace trace; 148 struct stack_trace trace;
107 149
108 memset(&trace, 0, sizeof(trace)); 150 memset(&trace, 0, sizeof(trace));
109 trace.max_entries = LT_BACKTRACEDEPTH; 151 trace.max_entries = LT_BACKTRACEDEPTH;
110 trace.entries = &lat->backtrace[0]; 152 trace.entries = &lat->backtrace[0];
111 trace.skip = 0;
112 save_stack_trace_tsk(tsk, &trace); 153 save_stack_trace_tsk(tsk, &trace);
113} 154}
114 155
156/**
157 * __account_scheduler_latency - record an occured latency
158 * @tsk - the task struct of the task hitting the latency
159 * @usecs - the duration of the latency in microseconds
160 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
161 *
162 * This function is the main entry point for recording latency entries
163 * as called by the scheduler.
164 *
165 * This function has a few special cases to deal with normal 'non-latency'
166 * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
167 * since this usually is caused by waiting for events via select() and co.
168 *
169 * Negative latencies (caused by time going backwards) are also explicitly
170 * skipped.
171 */
115void __sched 172void __sched
116account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) 173__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
117{ 174{
118 unsigned long flags; 175 unsigned long flags;
119 int i, q; 176 int i, q;
120 struct latency_record lat; 177 struct latency_record lat;
121 178
122 if (!latencytop_enabled)
123 return;
124
125 /* Long interruptible waits are generally user requested... */ 179 /* Long interruptible waits are generally user requested... */
126 if (inter && usecs > 5000) 180 if (inter && usecs > 5000)
127 return; 181 return;
128 182
183 /* Negative sleeps are time going backwards */
184 /* Zero-time sleeps are non-interesting */
185 if (usecs <= 0)
186 return;
187
129 memset(&lat, 0, sizeof(lat)); 188 memset(&lat, 0, sizeof(lat));
130 lat.count = 1; 189 lat.count = 1;
131 lat.time = usecs; 190 lat.time = usecs;
@@ -143,12 +202,12 @@ account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
143 if (tsk->latency_record_count >= LT_SAVECOUNT) 202 if (tsk->latency_record_count >= LT_SAVECOUNT)
144 goto out_unlock; 203 goto out_unlock;
145 204
146 for (i = 0; i < LT_SAVECOUNT ; i++) { 205 for (i = 0; i < LT_SAVECOUNT; i++) {
147 struct latency_record *mylat; 206 struct latency_record *mylat;
148 int same = 1; 207 int same = 1;
149 208
150 mylat = &tsk->latency_record[i]; 209 mylat = &tsk->latency_record[i];
151 for (q = 0 ; q < LT_BACKTRACEDEPTH ; q++) { 210 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
152 unsigned long record = lat.backtrace[q]; 211 unsigned long record = lat.backtrace[q];
153 212
154 if (mylat->backtrace[q] != record) { 213 if (mylat->backtrace[q] != record) {
@@ -186,7 +245,7 @@ static int lstats_show(struct seq_file *m, void *v)
186 for (i = 0; i < MAXLR; i++) { 245 for (i = 0; i < MAXLR; i++) {
187 if (latency_record[i].backtrace[0]) { 246 if (latency_record[i].backtrace[0]) {
188 int q; 247 int q;
189 seq_printf(m, "%i %li %li ", 248 seq_printf(m, "%i %lu %lu ",
190 latency_record[i].count, 249 latency_record[i].count,
191 latency_record[i].time, 250 latency_record[i].time,
192 latency_record[i].max); 251 latency_record[i].max);
@@ -223,7 +282,7 @@ static int lstats_open(struct inode *inode, struct file *filp)
223 return single_open(filp, lstats_show, NULL); 282 return single_open(filp, lstats_show, NULL);
224} 283}
225 284
226static struct file_operations lstats_fops = { 285static const struct file_operations lstats_fops = {
227 .open = lstats_open, 286 .open = lstats_open,
228 .read = seq_read, 287 .read = seq_read,
229 .write = lstats_write, 288 .write = lstats_write,
@@ -236,4 +295,4 @@ static int __init init_lstats_procfs(void)
236 proc_create("latency_stats", 0644, NULL, &lstats_fops); 295 proc_create("latency_stats", 0644, NULL, &lstats_fops);
237 return 0; 296 return 0;
238} 297}
239__initcall(init_lstats_procfs); 298device_initcall(init_lstats_procfs);
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 06b0c3568f0b..981cd4854281 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -41,6 +41,7 @@
41#include <linux/utsname.h> 41#include <linux/utsname.h>
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h>
44 45
45#include <asm/sections.h> 46#include <asm/sections.h>
46 47
@@ -310,12 +311,14 @@ EXPORT_SYMBOL(lockdep_on);
310#if VERBOSE 311#if VERBOSE
311# define HARDIRQ_VERBOSE 1 312# define HARDIRQ_VERBOSE 1
312# define SOFTIRQ_VERBOSE 1 313# define SOFTIRQ_VERBOSE 1
314# define RECLAIM_VERBOSE 1
313#else 315#else
314# define HARDIRQ_VERBOSE 0 316# define HARDIRQ_VERBOSE 0
315# define SOFTIRQ_VERBOSE 0 317# define SOFTIRQ_VERBOSE 0
318# define RECLAIM_VERBOSE 0
316#endif 319#endif
317 320
318#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE 321#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE
319/* 322/*
320 * Quick filtering for interesting events: 323 * Quick filtering for interesting events:
321 */ 324 */
@@ -430,30 +433,24 @@ atomic_t nr_find_usage_forwards_checks;
430atomic_t nr_find_usage_forwards_recursions; 433atomic_t nr_find_usage_forwards_recursions;
431atomic_t nr_find_usage_backwards_checks; 434atomic_t nr_find_usage_backwards_checks;
432atomic_t nr_find_usage_backwards_recursions; 435atomic_t nr_find_usage_backwards_recursions;
433# define debug_atomic_inc(ptr) atomic_inc(ptr)
434# define debug_atomic_dec(ptr) atomic_dec(ptr)
435# define debug_atomic_read(ptr) atomic_read(ptr)
436#else
437# define debug_atomic_inc(ptr) do { } while (0)
438# define debug_atomic_dec(ptr) do { } while (0)
439# define debug_atomic_read(ptr) 0
440#endif 436#endif
441 437
442/* 438/*
443 * Locking printouts: 439 * Locking printouts:
444 */ 440 */
445 441
442#define __USAGE(__STATE) \
443 [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \
444 [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \
445 [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\
446 [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R",
447
446static const char *usage_str[] = 448static const char *usage_str[] =
447{ 449{
448 [LOCK_USED] = "initial-use ", 450#define LOCKDEP_STATE(__STATE) __USAGE(__STATE)
449 [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", 451#include "lockdep_states.h"
450 [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", 452#undef LOCKDEP_STATE
451 [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", 453 [LOCK_USED] = "INITIAL USE",
452 [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W",
453 [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R",
454 [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R",
455 [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R",
456 [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R",
457}; 454};
458 455
459const char * __get_key_name(struct lockdep_subclass_key *key, char *str) 456const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
@@ -461,46 +458,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
461 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); 458 return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
462} 459}
463 460
464void 461static inline unsigned long lock_flag(enum lock_usage_bit bit)
465get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
466{ 462{
467 *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; 463 return 1UL << bit;
468 464}
469 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
470 *c1 = '+';
471 else
472 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
473 *c1 = '-';
474 465
475 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) 466static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit)
476 *c2 = '+'; 467{
477 else 468 char c = '.';
478 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
479 *c2 = '-';
480 469
481 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 470 if (class->usage_mask & lock_flag(bit + 2))
482 *c3 = '-'; 471 c = '+';
483 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { 472 if (class->usage_mask & lock_flag(bit)) {
484 *c3 = '+'; 473 c = '-';
485 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 474 if (class->usage_mask & lock_flag(bit + 2))
486 *c3 = '?'; 475 c = '?';
487 } 476 }
488 477
489 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) 478 return c;
490 *c4 = '-'; 479}
491 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { 480
492 *c4 = '+'; 481void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS])
493 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) 482{
494 *c4 = '?'; 483 int i = 0;
495 } 484
485#define LOCKDEP_STATE(__STATE) \
486 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \
487 usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ);
488#include "lockdep_states.h"
489#undef LOCKDEP_STATE
490
491 usage[i] = '\0';
496} 492}
497 493
498static void print_lock_name(struct lock_class *class) 494static void print_lock_name(struct lock_class *class)
499{ 495{
500 char str[KSYM_NAME_LEN], c1, c2, c3, c4; 496 char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS];
501 const char *name; 497 const char *name;
502 498
503 get_usage_chars(class, &c1, &c2, &c3, &c4); 499 get_usage_chars(class, usage);
504 500
505 name = class->name; 501 name = class->name;
506 if (!name) { 502 if (!name) {
@@ -513,7 +509,7 @@ static void print_lock_name(struct lock_class *class)
513 if (class->subclass) 509 if (class->subclass)
514 printk("/%d", class->subclass); 510 printk("/%d", class->subclass);
515 } 511 }
516 printk("){%c%c%c%c}", c1, c2, c3, c4); 512 printk("){%s}", usage);
517} 513}
518 514
519static void print_lockdep_cache(struct lockdep_map *lock) 515static void print_lockdep_cache(struct lockdep_map *lock)
@@ -1263,9 +1259,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1263 bit_backwards, bit_forwards, irqclass); 1259 bit_backwards, bit_forwards, irqclass);
1264} 1260}
1265 1261
1266static int 1262static const char *state_names[] = {
1267check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, 1263#define LOCKDEP_STATE(__STATE) \
1268 struct held_lock *next) 1264 __stringify(__STATE),
1265#include "lockdep_states.h"
1266#undef LOCKDEP_STATE
1267};
1268
1269static const char *state_rnames[] = {
1270#define LOCKDEP_STATE(__STATE) \
1271 __stringify(__STATE)"-READ",
1272#include "lockdep_states.h"
1273#undef LOCKDEP_STATE
1274};
1275
1276static inline const char *state_name(enum lock_usage_bit bit)
1277{
1278 return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2];
1279}
1280
1281static int exclusive_bit(int new_bit)
1282{
1283 /*
1284 * USED_IN
1285 * USED_IN_READ
1286 * ENABLED
1287 * ENABLED_READ
1288 *
1289 * bit 0 - write/read
1290 * bit 1 - used_in/enabled
1291 * bit 2+ state
1292 */
1293
1294 int state = new_bit & ~3;
1295 int dir = new_bit & 2;
1296
1297 /*
1298 * keep state, bit flip the direction and strip read.
1299 */
1300 return state | (dir ^ 2);
1301}
1302
1303static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1304 struct held_lock *next, enum lock_usage_bit bit)
1269{ 1305{
1270 /* 1306 /*
1271 * Prove that the new dependency does not connect a hardirq-safe 1307 * Prove that the new dependency does not connect a hardirq-safe
@@ -1273,38 +1309,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1273 * the backwards-subgraph starting at <prev>, and the 1309 * the backwards-subgraph starting at <prev>, and the
1274 * forwards-subgraph starting at <next>: 1310 * forwards-subgraph starting at <next>:
1275 */ 1311 */
1276 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, 1312 if (!check_usage(curr, prev, next, bit,
1277 LOCK_ENABLED_HARDIRQS, "hard")) 1313 exclusive_bit(bit), state_name(bit)))
1278 return 0; 1314 return 0;
1279 1315
1316 bit++; /* _READ */
1317
1280 /* 1318 /*
1281 * Prove that the new dependency does not connect a hardirq-safe-read 1319 * Prove that the new dependency does not connect a hardirq-safe-read
1282 * lock with a hardirq-unsafe lock - to achieve this we search 1320 * lock with a hardirq-unsafe lock - to achieve this we search
1283 * the backwards-subgraph starting at <prev>, and the 1321 * the backwards-subgraph starting at <prev>, and the
1284 * forwards-subgraph starting at <next>: 1322 * forwards-subgraph starting at <next>:
1285 */ 1323 */
1286 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, 1324 if (!check_usage(curr, prev, next, bit,
1287 LOCK_ENABLED_HARDIRQS, "hard-read")) 1325 exclusive_bit(bit), state_name(bit)))
1288 return 0; 1326 return 0;
1289 1327
1290 /* 1328 return 1;
1291 * Prove that the new dependency does not connect a softirq-safe 1329}
1292 * lock with a softirq-unsafe lock - to achieve this we search 1330
1293 * the backwards-subgraph starting at <prev>, and the 1331static int
1294 * forwards-subgraph starting at <next>: 1332check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1295 */ 1333 struct held_lock *next)
1296 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, 1334{
1297 LOCK_ENABLED_SOFTIRQS, "soft")) 1335#define LOCKDEP_STATE(__STATE) \
1298 return 0; 1336 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1299 /*
1300 * Prove that the new dependency does not connect a softirq-safe-read
1301 * lock with a softirq-unsafe lock - to achieve this we search
1302 * the backwards-subgraph starting at <prev>, and the
1303 * forwards-subgraph starting at <next>:
1304 */
1305 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1306 LOCK_ENABLED_SOFTIRQS, "soft"))
1307 return 0; 1337 return 0;
1338#include "lockdep_states.h"
1339#undef LOCKDEP_STATE
1308 1340
1309 return 1; 1341 return 1;
1310} 1342}
@@ -1861,9 +1893,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1861 curr->comm, task_pid_nr(curr)); 1893 curr->comm, task_pid_nr(curr));
1862 print_lock(this); 1894 print_lock(this);
1863 if (forwards) 1895 if (forwards)
1864 printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); 1896 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1865 else 1897 else
1866 printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); 1898 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1867 print_lock_name(other); 1899 print_lock_name(other);
1868 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 1900 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1869 1901
@@ -1933,7 +1965,7 @@ void print_irqtrace_events(struct task_struct *curr)
1933 print_ip_sym(curr->softirq_disable_ip); 1965 print_ip_sym(curr->softirq_disable_ip);
1934} 1966}
1935 1967
1936static int hardirq_verbose(struct lock_class *class) 1968static int HARDIRQ_verbose(struct lock_class *class)
1937{ 1969{
1938#if HARDIRQ_VERBOSE 1970#if HARDIRQ_VERBOSE
1939 return class_filter(class); 1971 return class_filter(class);
@@ -1941,7 +1973,7 @@ static int hardirq_verbose(struct lock_class *class)
1941 return 0; 1973 return 0;
1942} 1974}
1943 1975
1944static int softirq_verbose(struct lock_class *class) 1976static int SOFTIRQ_verbose(struct lock_class *class)
1945{ 1977{
1946#if SOFTIRQ_VERBOSE 1978#if SOFTIRQ_VERBOSE
1947 return class_filter(class); 1979 return class_filter(class);
@@ -1949,185 +1981,95 @@ static int softirq_verbose(struct lock_class *class)
1949 return 0; 1981 return 0;
1950} 1982}
1951 1983
1984static int RECLAIM_FS_verbose(struct lock_class *class)
1985{
1986#if RECLAIM_VERBOSE
1987 return class_filter(class);
1988#endif
1989 return 0;
1990}
1991
1952#define STRICT_READ_CHECKS 1 1992#define STRICT_READ_CHECKS 1
1953 1993
1954static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, 1994static int (*state_verbose_f[])(struct lock_class *class) = {
1995#define LOCKDEP_STATE(__STATE) \
1996 __STATE##_verbose,
1997#include "lockdep_states.h"
1998#undef LOCKDEP_STATE
1999};
2000
2001static inline int state_verbose(enum lock_usage_bit bit,
2002 struct lock_class *class)
2003{
2004 return state_verbose_f[bit >> 2](class);
2005}
2006
2007typedef int (*check_usage_f)(struct task_struct *, struct held_lock *,
2008 enum lock_usage_bit bit, const char *name);
2009
2010static int
2011mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1955 enum lock_usage_bit new_bit) 2012 enum lock_usage_bit new_bit)
1956{ 2013{
1957 int ret = 1; 2014 int excl_bit = exclusive_bit(new_bit);
2015 int read = new_bit & 1;
2016 int dir = new_bit & 2;
1958 2017
1959 switch(new_bit) { 2018 /*
1960 case LOCK_USED_IN_HARDIRQ: 2019 * mark USED_IN has to look forwards -- to ensure no dependency
1961 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) 2020 * has ENABLED state, which would allow recursion deadlocks.
1962 return 0; 2021 *
1963 if (!valid_state(curr, this, new_bit, 2022 * mark ENABLED has to look backwards -- to ensure no dependee
1964 LOCK_ENABLED_HARDIRQS_READ)) 2023 * has USED_IN state, which, again, would allow recursion deadlocks.
1965 return 0; 2024 */
1966 /* 2025 check_usage_f usage = dir ?
1967 * just marked it hardirq-safe, check that this lock 2026 check_usage_backwards : check_usage_forwards;
1968 * took no hardirq-unsafe lock in the past: 2027
1969 */ 2028 /*
1970 if (!check_usage_forwards(curr, this, 2029 * Validate that this particular lock does not have conflicting
1971 LOCK_ENABLED_HARDIRQS, "hard")) 2030 * usage states.
1972 return 0; 2031 */
1973#if STRICT_READ_CHECKS 2032 if (!valid_state(curr, this, new_bit, excl_bit))
1974 /* 2033 return 0;
1975 * just marked it hardirq-safe, check that this lock 2034
1976 * took no hardirq-unsafe-read lock in the past: 2035 /*
1977 */ 2036 * Validate that the lock dependencies don't have conflicting usage
1978 if (!check_usage_forwards(curr, this, 2037 * states.
1979 LOCK_ENABLED_HARDIRQS_READ, "hard-read")) 2038 */
1980 return 0; 2039 if ((!read || !dir || STRICT_READ_CHECKS) &&
1981#endif 2040 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
1982 if (hardirq_verbose(hlock_class(this))) 2041 return 0;
1983 ret = 2; 2042
1984 break; 2043 /*
1985 case LOCK_USED_IN_SOFTIRQ: 2044 * Check for read in write conflicts
1986 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) 2045 */
1987 return 0; 2046 if (!read) {
1988 if (!valid_state(curr, this, new_bit, 2047 if (!valid_state(curr, this, new_bit, excl_bit + 1))
1989 LOCK_ENABLED_SOFTIRQS_READ))
1990 return 0;
1991 /*
1992 * just marked it softirq-safe, check that this lock
1993 * took no softirq-unsafe lock in the past:
1994 */
1995 if (!check_usage_forwards(curr, this,
1996 LOCK_ENABLED_SOFTIRQS, "soft"))
1997 return 0;
1998#if STRICT_READ_CHECKS
1999 /*
2000 * just marked it softirq-safe, check that this lock
2001 * took no softirq-unsafe-read lock in the past:
2002 */
2003 if (!check_usage_forwards(curr, this,
2004 LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
2005 return 0;
2006#endif
2007 if (softirq_verbose(hlock_class(this)))
2008 ret = 2;
2009 break;
2010 case LOCK_USED_IN_HARDIRQ_READ:
2011 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
2012 return 0;
2013 /*
2014 * just marked it hardirq-read-safe, check that this lock
2015 * took no hardirq-unsafe lock in the past:
2016 */
2017 if (!check_usage_forwards(curr, this,
2018 LOCK_ENABLED_HARDIRQS, "hard"))
2019 return 0;
2020 if (hardirq_verbose(hlock_class(this)))
2021 ret = 2;
2022 break;
2023 case LOCK_USED_IN_SOFTIRQ_READ:
2024 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
2025 return 0;
2026 /*
2027 * just marked it softirq-read-safe, check that this lock
2028 * took no softirq-unsafe lock in the past:
2029 */
2030 if (!check_usage_forwards(curr, this,
2031 LOCK_ENABLED_SOFTIRQS, "soft"))
2032 return 0;
2033 if (softirq_verbose(hlock_class(this)))
2034 ret = 2;
2035 break;
2036 case LOCK_ENABLED_HARDIRQS:
2037 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2038 return 0;
2039 if (!valid_state(curr, this, new_bit,
2040 LOCK_USED_IN_HARDIRQ_READ))
2041 return 0;
2042 /*
2043 * just marked it hardirq-unsafe, check that no hardirq-safe
2044 * lock in the system ever took it in the past:
2045 */
2046 if (!check_usage_backwards(curr, this,
2047 LOCK_USED_IN_HARDIRQ, "hard"))
2048 return 0;
2049#if STRICT_READ_CHECKS
2050 /*
2051 * just marked it hardirq-unsafe, check that no
2052 * hardirq-safe-read lock in the system ever took
2053 * it in the past:
2054 */
2055 if (!check_usage_backwards(curr, this,
2056 LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
2057 return 0;
2058#endif
2059 if (hardirq_verbose(hlock_class(this)))
2060 ret = 2;
2061 break;
2062 case LOCK_ENABLED_SOFTIRQS:
2063 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2064 return 0;
2065 if (!valid_state(curr, this, new_bit,
2066 LOCK_USED_IN_SOFTIRQ_READ))
2067 return 0;
2068 /*
2069 * just marked it softirq-unsafe, check that no softirq-safe
2070 * lock in the system ever took it in the past:
2071 */
2072 if (!check_usage_backwards(curr, this,
2073 LOCK_USED_IN_SOFTIRQ, "soft"))
2074 return 0;
2075#if STRICT_READ_CHECKS
2076 /*
2077 * just marked it softirq-unsafe, check that no
2078 * softirq-safe-read lock in the system ever took
2079 * it in the past:
2080 */
2081 if (!check_usage_backwards(curr, this,
2082 LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
2083 return 0;
2084#endif
2085 if (softirq_verbose(hlock_class(this)))
2086 ret = 2;
2087 break;
2088 case LOCK_ENABLED_HARDIRQS_READ:
2089 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
2090 return 0;
2091#if STRICT_READ_CHECKS
2092 /*
2093 * just marked it hardirq-read-unsafe, check that no
2094 * hardirq-safe lock in the system ever took it in the past:
2095 */
2096 if (!check_usage_backwards(curr, this,
2097 LOCK_USED_IN_HARDIRQ, "hard"))
2098 return 0;
2099#endif
2100 if (hardirq_verbose(hlock_class(this)))
2101 ret = 2;
2102 break;
2103 case LOCK_ENABLED_SOFTIRQS_READ:
2104 if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
2105 return 0; 2048 return 0;
2106#if STRICT_READ_CHECKS 2049
2107 /* 2050 if (STRICT_READ_CHECKS &&
2108 * just marked it softirq-read-unsafe, check that no 2051 !usage(curr, this, excl_bit + 1,
2109 * softirq-safe lock in the system ever took it in the past: 2052 state_name(new_bit + 1)))
2110 */
2111 if (!check_usage_backwards(curr, this,
2112 LOCK_USED_IN_SOFTIRQ, "soft"))
2113 return 0; 2053 return 0;
2114#endif
2115 if (softirq_verbose(hlock_class(this)))
2116 ret = 2;
2117 break;
2118 default:
2119 WARN_ON(1);
2120 break;
2121 } 2054 }
2122 2055
2123 return ret; 2056 if (state_verbose(new_bit, hlock_class(this)))
2057 return 2;
2058
2059 return 1;
2124} 2060}
2125 2061
2062enum mark_type {
2063#define LOCKDEP_STATE(__STATE) __STATE,
2064#include "lockdep_states.h"
2065#undef LOCKDEP_STATE
2066};
2067
2126/* 2068/*
2127 * Mark all held locks with a usage bit: 2069 * Mark all held locks with a usage bit:
2128 */ 2070 */
2129static int 2071static int
2130mark_held_locks(struct task_struct *curr, int hardirq) 2072mark_held_locks(struct task_struct *curr, enum mark_type mark)
2131{ 2073{
2132 enum lock_usage_bit usage_bit; 2074 enum lock_usage_bit usage_bit;
2133 struct held_lock *hlock; 2075 struct held_lock *hlock;
@@ -2136,17 +2078,12 @@ mark_held_locks(struct task_struct *curr, int hardirq)
2136 for (i = 0; i < curr->lockdep_depth; i++) { 2078 for (i = 0; i < curr->lockdep_depth; i++) {
2137 hlock = curr->held_locks + i; 2079 hlock = curr->held_locks + i;
2138 2080
2139 if (hardirq) { 2081 usage_bit = 2 + (mark << 2); /* ENABLED */
2140 if (hlock->read) 2082 if (hlock->read)
2141 usage_bit = LOCK_ENABLED_HARDIRQS_READ; 2083 usage_bit += 1; /* READ */
2142 else 2084
2143 usage_bit = LOCK_ENABLED_HARDIRQS; 2085 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2144 } else { 2086
2145 if (hlock->read)
2146 usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
2147 else
2148 usage_bit = LOCK_ENABLED_SOFTIRQS;
2149 }
2150 if (!mark_lock(curr, hlock, usage_bit)) 2087 if (!mark_lock(curr, hlock, usage_bit))
2151 return 0; 2088 return 0;
2152 } 2089 }
@@ -2200,7 +2137,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2200 * We are going to turn hardirqs on, so set the 2137 * We are going to turn hardirqs on, so set the
2201 * usage bit for all held locks: 2138 * usage bit for all held locks:
2202 */ 2139 */
2203 if (!mark_held_locks(curr, 1)) 2140 if (!mark_held_locks(curr, HARDIRQ))
2204 return; 2141 return;
2205 /* 2142 /*
2206 * If we have softirqs enabled, then set the usage 2143 * If we have softirqs enabled, then set the usage
@@ -2208,7 +2145,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2208 * this bit from being set before) 2145 * this bit from being set before)
2209 */ 2146 */
2210 if (curr->softirqs_enabled) 2147 if (curr->softirqs_enabled)
2211 if (!mark_held_locks(curr, 0)) 2148 if (!mark_held_locks(curr, SOFTIRQ))
2212 return; 2149 return;
2213 2150
2214 curr->hardirq_enable_ip = ip; 2151 curr->hardirq_enable_ip = ip;
@@ -2288,7 +2225,7 @@ void trace_softirqs_on(unsigned long ip)
2288 * enabled too: 2225 * enabled too:
2289 */ 2226 */
2290 if (curr->hardirqs_enabled) 2227 if (curr->hardirqs_enabled)
2291 mark_held_locks(curr, 0); 2228 mark_held_locks(curr, SOFTIRQ);
2292} 2229}
2293 2230
2294/* 2231/*
@@ -2317,6 +2254,48 @@ void trace_softirqs_off(unsigned long ip)
2317 debug_atomic_inc(&redundant_softirqs_off); 2254 debug_atomic_inc(&redundant_softirqs_off);
2318} 2255}
2319 2256
2257static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
2258{
2259 struct task_struct *curr = current;
2260
2261 if (unlikely(!debug_locks))
2262 return;
2263
2264 /* no reclaim without waiting on it */
2265 if (!(gfp_mask & __GFP_WAIT))
2266 return;
2267
2268 /* this guy won't enter reclaim */
2269 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2270 return;
2271
2272 /* We're only interested __GFP_FS allocations for now */
2273 if (!(gfp_mask & __GFP_FS))
2274 return;
2275
2276 if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags)))
2277 return;
2278
2279 mark_held_locks(curr, RECLAIM_FS);
2280}
2281
2282static void check_flags(unsigned long flags);
2283
2284void lockdep_trace_alloc(gfp_t gfp_mask)
2285{
2286 unsigned long flags;
2287
2288 if (unlikely(current->lockdep_recursion))
2289 return;
2290
2291 raw_local_irq_save(flags);
2292 check_flags(flags);
2293 current->lockdep_recursion = 1;
2294 __lockdep_trace_alloc(gfp_mask, flags);
2295 current->lockdep_recursion = 0;
2296 raw_local_irq_restore(flags);
2297}
2298
2320static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) 2299static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2321{ 2300{
2322 /* 2301 /*
@@ -2345,19 +2324,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2345 if (!hlock->hardirqs_off) { 2324 if (!hlock->hardirqs_off) {
2346 if (hlock->read) { 2325 if (hlock->read) {
2347 if (!mark_lock(curr, hlock, 2326 if (!mark_lock(curr, hlock,
2348 LOCK_ENABLED_HARDIRQS_READ)) 2327 LOCK_ENABLED_HARDIRQ_READ))
2349 return 0; 2328 return 0;
2350 if (curr->softirqs_enabled) 2329 if (curr->softirqs_enabled)
2351 if (!mark_lock(curr, hlock, 2330 if (!mark_lock(curr, hlock,
2352 LOCK_ENABLED_SOFTIRQS_READ)) 2331 LOCK_ENABLED_SOFTIRQ_READ))
2353 return 0; 2332 return 0;
2354 } else { 2333 } else {
2355 if (!mark_lock(curr, hlock, 2334 if (!mark_lock(curr, hlock,
2356 LOCK_ENABLED_HARDIRQS)) 2335 LOCK_ENABLED_HARDIRQ))
2357 return 0; 2336 return 0;
2358 if (curr->softirqs_enabled) 2337 if (curr->softirqs_enabled)
2359 if (!mark_lock(curr, hlock, 2338 if (!mark_lock(curr, hlock,
2360 LOCK_ENABLED_SOFTIRQS)) 2339 LOCK_ENABLED_SOFTIRQ))
2340 return 0;
2341 }
2342 }
2343
2344 /*
2345 * We reuse the irq context infrastructure more broadly as a general
2346 * context checking code. This tests GFP_FS recursion (a lock taken
2347 * during reclaim for a GFP_FS allocation is held over a GFP_FS
2348 * allocation).
2349 */
2350 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2351 if (hlock->read) {
2352 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2353 return 0;
2354 } else {
2355 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2361 return 0; 2356 return 0;
2362 } 2357 }
2363 } 2358 }
@@ -2412,6 +2407,10 @@ static inline int separate_irq_context(struct task_struct *curr,
2412 return 0; 2407 return 0;
2413} 2408}
2414 2409
2410void lockdep_trace_alloc(gfp_t gfp_mask)
2411{
2412}
2413
2415#endif 2414#endif
2416 2415
2417/* 2416/*
@@ -2445,14 +2444,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2445 return 0; 2444 return 0;
2446 2445
2447 switch (new_bit) { 2446 switch (new_bit) {
2448 case LOCK_USED_IN_HARDIRQ: 2447#define LOCKDEP_STATE(__STATE) \
2449 case LOCK_USED_IN_SOFTIRQ: 2448 case LOCK_USED_IN_##__STATE: \
2450 case LOCK_USED_IN_HARDIRQ_READ: 2449 case LOCK_USED_IN_##__STATE##_READ: \
2451 case LOCK_USED_IN_SOFTIRQ_READ: 2450 case LOCK_ENABLED_##__STATE: \
2452 case LOCK_ENABLED_HARDIRQS: 2451 case LOCK_ENABLED_##__STATE##_READ:
2453 case LOCK_ENABLED_SOFTIRQS: 2452#include "lockdep_states.h"
2454 case LOCK_ENABLED_HARDIRQS_READ: 2453#undef LOCKDEP_STATE
2455 case LOCK_ENABLED_SOFTIRQS_READ:
2456 ret = mark_lock_irq(curr, this, new_bit); 2454 ret = mark_lock_irq(curr, this, new_bit);
2457 if (!ret) 2455 if (!ret)
2458 return 0; 2456 return 0;
@@ -2966,6 +2964,16 @@ void lock_release(struct lockdep_map *lock, int nested,
2966} 2964}
2967EXPORT_SYMBOL_GPL(lock_release); 2965EXPORT_SYMBOL_GPL(lock_release);
2968 2966
2967void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2968{
2969 current->lockdep_reclaim_gfp = gfp_mask;
2970}
2971
2972void lockdep_clear_current_reclaim_state(void)
2973{
2974 current->lockdep_reclaim_gfp = 0;
2975}
2976
2969#ifdef CONFIG_LOCK_STAT 2977#ifdef CONFIG_LOCK_STAT
2970static int 2978static int
2971print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, 2979print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 56b196932c08..a2cc7e9a6e84 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -7,6 +7,45 @@
7 */ 7 */
8 8
9/* 9/*
10 * Lock-class usage-state bits:
11 */
12enum lock_usage_bit {
13#define LOCKDEP_STATE(__STATE) \
14 LOCK_USED_IN_##__STATE, \
15 LOCK_USED_IN_##__STATE##_READ, \
16 LOCK_ENABLED_##__STATE, \
17 LOCK_ENABLED_##__STATE##_READ,
18#include "lockdep_states.h"
19#undef LOCKDEP_STATE
20 LOCK_USED,
21 LOCK_USAGE_STATES
22};
23
24/*
25 * Usage-state bitmasks:
26 */
27#define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE),
28
29enum {
30#define LOCKDEP_STATE(__STATE) \
31 __LOCKF(USED_IN_##__STATE) \
32 __LOCKF(USED_IN_##__STATE##_READ) \
33 __LOCKF(ENABLED_##__STATE) \
34 __LOCKF(ENABLED_##__STATE##_READ)
35#include "lockdep_states.h"
36#undef LOCKDEP_STATE
37 __LOCKF(USED)
38};
39
40#define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ)
41#define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
42
43#define LOCKF_ENABLED_IRQ_READ \
44 (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ)
45#define LOCKF_USED_IN_IRQ_READ \
46 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
47
48/*
10 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies 49 * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
11 * we track. 50 * we track.
12 * 51 *
@@ -31,8 +70,10 @@
31extern struct list_head all_lock_classes; 70extern struct list_head all_lock_classes;
32extern struct lock_chain lock_chains[]; 71extern struct lock_chain lock_chains[];
33 72
34extern void 73#define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2)
35get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); 74
75extern void get_usage_chars(struct lock_class *class,
76 char usage[LOCK_USAGE_CHARS]);
36 77
37extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); 78extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str);
38 79
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index 13716b813896..d7135aa2d2c4 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v)
84{ 84{
85 struct lock_class *class = v; 85 struct lock_class *class = v;
86 struct lock_list *entry; 86 struct lock_list *entry;
87 char c1, c2, c3, c4; 87 char usage[LOCK_USAGE_CHARS];
88 88
89 if (v == SEQ_START_TOKEN) { 89 if (v == SEQ_START_TOKEN) {
90 seq_printf(m, "all lock classes:\n"); 90 seq_printf(m, "all lock classes:\n");
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v)
100 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); 100 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
101#endif 101#endif
102 102
103 get_usage_chars(class, &c1, &c2, &c3, &c4); 103 get_usage_chars(class, usage);
104 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); 104 seq_printf(m, " %s", usage);
105 105
106 seq_printf(m, ": "); 106 seq_printf(m, ": ");
107 print_name(m, class); 107 print_name(m, class);
@@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
300 nr_uncategorized++; 300 nr_uncategorized++;
301 if (class->usage_mask & LOCKF_USED_IN_IRQ) 301 if (class->usage_mask & LOCKF_USED_IN_IRQ)
302 nr_irq_safe++; 302 nr_irq_safe++;
303 if (class->usage_mask & LOCKF_ENABLED_IRQS) 303 if (class->usage_mask & LOCKF_ENABLED_IRQ)
304 nr_irq_unsafe++; 304 nr_irq_unsafe++;
305 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) 305 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
306 nr_softirq_safe++; 306 nr_softirq_safe++;
307 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) 307 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
308 nr_softirq_unsafe++; 308 nr_softirq_unsafe++;
309 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) 309 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
310 nr_hardirq_safe++; 310 nr_hardirq_safe++;
311 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) 311 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
312 nr_hardirq_unsafe++; 312 nr_hardirq_unsafe++;
313 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) 313 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
314 nr_irq_read_safe++; 314 nr_irq_read_safe++;
315 if (class->usage_mask & LOCKF_ENABLED_IRQS_READ) 315 if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
316 nr_irq_read_unsafe++; 316 nr_irq_read_unsafe++;
317 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) 317 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
318 nr_softirq_read_safe++; 318 nr_softirq_read_safe++;
319 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) 319 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
320 nr_softirq_read_unsafe++; 320 nr_softirq_read_unsafe++;
321 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) 321 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
322 nr_hardirq_read_safe++; 322 nr_hardirq_read_safe++;
323 if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) 323 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
324 nr_hardirq_read_unsafe++; 324 nr_hardirq_read_unsafe++;
325 325
326#ifdef CONFIG_PROVE_LOCKING 326#ifdef CONFIG_PROVE_LOCKING
@@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
601static void seq_header(struct seq_file *m) 601static void seq_header(struct seq_file *m)
602{ 602{
603 seq_printf(m, "lock_stat version 0.3\n"); 603 seq_printf(m, "lock_stat version 0.3\n");
604
605 if (unlikely(!debug_locks))
606 seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
607
604 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); 608 seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1));
605 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " 609 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s "
606 "%14s %14s\n", 610 "%14s %14s\n",
diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h
new file mode 100644
index 000000000000..995b0cc2b84c
--- /dev/null
+++ b/kernel/lockdep_states.h
@@ -0,0 +1,9 @@
1/*
2 * Lockdep states,
3 *
4 * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever
5 * you add one, or come up with a nice dynamic solution.
6 */
7LOCKDEP_STATE(HARDIRQ)
8LOCKDEP_STATE(SOFTIRQ)
9LOCKDEP_STATE(RECLAIM_FS)
diff --git a/kernel/module.c b/kernel/module.c
index 1196f5d11700..f77ac320d0b5 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -51,6 +51,7 @@
51#include <linux/tracepoint.h> 51#include <linux/tracepoint.h>
52#include <linux/ftrace.h> 52#include <linux/ftrace.h>
53#include <linux/async.h> 53#include <linux/async.h>
54#include <linux/percpu.h>
54 55
55#if 0 56#if 0
56#define DEBUGP printk 57#define DEBUGP printk
@@ -366,6 +367,34 @@ static struct module *find_module(const char *name)
366} 367}
367 368
368#ifdef CONFIG_SMP 369#ifdef CONFIG_SMP
370
371#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
372
373static void *percpu_modalloc(unsigned long size, unsigned long align,
374 const char *name)
375{
376 void *ptr;
377
378 if (align > PAGE_SIZE) {
379 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
380 name, align, PAGE_SIZE);
381 align = PAGE_SIZE;
382 }
383
384 ptr = __alloc_reserved_percpu(size, align);
385 if (!ptr)
386 printk(KERN_WARNING
387 "Could not allocate %lu bytes percpu data\n", size);
388 return ptr;
389}
390
391static void percpu_modfree(void *freeme)
392{
393 free_percpu(freeme);
394}
395
396#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
397
369/* Number of blocks used and allocated. */ 398/* Number of blocks used and allocated. */
370static unsigned int pcpu_num_used, pcpu_num_allocated; 399static unsigned int pcpu_num_used, pcpu_num_allocated;
371/* Size of each block. -ve means used. */ 400/* Size of each block. -ve means used. */
@@ -480,21 +509,6 @@ static void percpu_modfree(void *freeme)
480 } 509 }
481} 510}
482 511
483static unsigned int find_pcpusec(Elf_Ehdr *hdr,
484 Elf_Shdr *sechdrs,
485 const char *secstrings)
486{
487 return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
488}
489
490static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
491{
492 int cpu;
493
494 for_each_possible_cpu(cpu)
495 memcpy(pcpudest + per_cpu_offset(cpu), from, size);
496}
497
498static int percpu_modinit(void) 512static int percpu_modinit(void)
499{ 513{
500 pcpu_num_used = 2; 514 pcpu_num_used = 2;
@@ -513,7 +527,26 @@ static int percpu_modinit(void)
513 return 0; 527 return 0;
514} 528}
515__initcall(percpu_modinit); 529__initcall(percpu_modinit);
530
531#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
532
533static unsigned int find_pcpusec(Elf_Ehdr *hdr,
534 Elf_Shdr *sechdrs,
535 const char *secstrings)
536{
537 return find_sec(hdr, sechdrs, secstrings, ".data.percpu");
538}
539
540static void percpu_modcopy(void *pcpudest, const void *from, unsigned long size)
541{
542 int cpu;
543
544 for_each_possible_cpu(cpu)
545 memcpy(pcpudest + per_cpu_offset(cpu), from, size);
546}
547
516#else /* ... !CONFIG_SMP */ 548#else /* ... !CONFIG_SMP */
549
517static inline void *percpu_modalloc(unsigned long size, unsigned long align, 550static inline void *percpu_modalloc(unsigned long size, unsigned long align,
518 const char *name) 551 const char *name)
519{ 552{
@@ -535,6 +568,7 @@ static inline void percpu_modcopy(void *pcpudst, const void *src,
535 /* pcpusec should be 0, and size of that section should be 0. */ 568 /* pcpusec should be 0, and size of that section should be 0. */
536 BUG_ON(size != 0); 569 BUG_ON(size != 0);
537} 570}
571
538#endif /* CONFIG_SMP */ 572#endif /* CONFIG_SMP */
539 573
540#define MODINFO_ATTR(field) \ 574#define MODINFO_ATTR(field) \
@@ -822,7 +856,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
822 mutex_lock(&module_mutex); 856 mutex_lock(&module_mutex);
823 /* Store the name of the last unloaded module for diagnostic purposes */ 857 /* Store the name of the last unloaded module for diagnostic purposes */
824 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 858 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
825 unregister_dynamic_debug_module(mod->name); 859 ddebug_remove_module(mod->name);
826 free_module(mod); 860 free_module(mod);
827 861
828 out: 862 out:
@@ -1827,19 +1861,13 @@ static inline void add_kallsyms(struct module *mod,
1827} 1861}
1828#endif /* CONFIG_KALLSYMS */ 1862#endif /* CONFIG_KALLSYMS */
1829 1863
1830static void dynamic_printk_setup(struct mod_debug *debug, unsigned int num) 1864static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
1831{ 1865{
1832#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG 1866#ifdef CONFIG_DYNAMIC_DEBUG
1833 unsigned int i; 1867 if (ddebug_add_module(debug, num, debug->modname))
1834 1868 printk(KERN_ERR "dynamic debug error adding module: %s\n",
1835 for (i = 0; i < num; i++) { 1869 debug->modname);
1836 register_dynamic_debug_module(debug[i].modname, 1870#endif
1837 debug[i].type,
1838 debug[i].logical_modname,
1839 debug[i].flag_names,
1840 debug[i].hash, debug[i].hash2);
1841 }
1842#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
1843} 1871}
1844 1872
1845static void *module_alloc_update_bounds(unsigned long size) 1873static void *module_alloc_update_bounds(unsigned long size)
@@ -2213,12 +2241,13 @@ static noinline struct module *load_module(void __user *umod,
2213 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2241 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
2214 2242
2215 if (!mod->taints) { 2243 if (!mod->taints) {
2216 struct mod_debug *debug; 2244 struct _ddebug *debug;
2217 unsigned int num_debug; 2245 unsigned int num_debug;
2218 2246
2219 debug = section_objs(hdr, sechdrs, secstrings, "__verbose", 2247 debug = section_objs(hdr, sechdrs, secstrings, "__verbose",
2220 sizeof(*debug), &num_debug); 2248 sizeof(*debug), &num_debug);
2221 dynamic_printk_setup(debug, num_debug); 2249 if (debug)
2250 dynamic_debug_setup(debug, num_debug);
2222 } 2251 }
2223 2252
2224 /* sechdrs[0].sh_size is always zero */ 2253 /* sechdrs[0].sh_size is always zero */
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 1d94160eb532..50d022e5a560 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -26,11 +26,6 @@
26/* 26/*
27 * Must be called with lock->wait_lock held. 27 * Must be called with lock->wait_lock held.
28 */ 28 */
29void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
30{
31 lock->owner = new_owner;
32}
33
34void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) 29void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
35{ 30{
36 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); 31 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
@@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
59 54
60 /* Mark the current thread as blocked on the lock: */ 55 /* Mark the current thread as blocked on the lock: */
61 ti->task->blocked_on = waiter; 56 ti->task->blocked_on = waiter;
62 waiter->lock = lock;
63} 57}
64 58
65void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 59void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
@@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock)
82 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 76 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
83 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); 77 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
84 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 78 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
85 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); 79 mutex_clear_owner(lock);
86} 80}
87 81
88void debug_mutex_init(struct mutex *lock, const char *name, 82void debug_mutex_init(struct mutex *lock, const char *name,
@@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name,
95 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 89 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
96 lockdep_init_map(&lock->dep_map, name, key, 0); 90 lockdep_init_map(&lock->dep_map, name, key, 0);
97#endif 91#endif
98 lock->owner = NULL;
99 lock->magic = lock; 92 lock->magic = lock;
100} 93}
101 94
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index babfbdfc534b..6b2d735846a5 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -13,14 +13,6 @@
13/* 13/*
14 * This must be called with lock->wait_lock held. 14 * This must be called with lock->wait_lock held.
15 */ 15 */
16extern void
17debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
18
19static inline void debug_mutex_clear_owner(struct mutex *lock)
20{
21 lock->owner = NULL;
22}
23
24extern void debug_mutex_lock_common(struct mutex *lock, 16extern void debug_mutex_lock_common(struct mutex *lock,
25 struct mutex_waiter *waiter); 17 struct mutex_waiter *waiter);
26extern void debug_mutex_wake_waiter(struct mutex *lock, 18extern void debug_mutex_wake_waiter(struct mutex *lock,
@@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock);
35extern void debug_mutex_init(struct mutex *lock, const char *name, 27extern void debug_mutex_init(struct mutex *lock, const char *name,
36 struct lock_class_key *key); 28 struct lock_class_key *key);
37 29
30static inline void mutex_set_owner(struct mutex *lock)
31{
32 lock->owner = current_thread_info();
33}
34
35static inline void mutex_clear_owner(struct mutex *lock)
36{
37 lock->owner = NULL;
38}
39
38#define spin_lock_mutex(lock, flags) \ 40#define spin_lock_mutex(lock, flags) \
39 do { \ 41 do { \
40 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 42 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4f45d4b658ef..5d79781394a3 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -10,6 +10,11 @@
10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and 10 * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
11 * David Howells for suggestions and improvements. 11 * David Howells for suggestions and improvements.
12 * 12 *
13 * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
14 * from the -rt tree, where it was originally implemented for rtmutexes
15 * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
16 * and Sven Dietrich.
17 *
13 * Also see Documentation/mutex-design.txt. 18 * Also see Documentation/mutex-design.txt.
14 */ 19 */
15#include <linux/mutex.h> 20#include <linux/mutex.h>
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
46 atomic_set(&lock->count, 1); 51 atomic_set(&lock->count, 1);
47 spin_lock_init(&lock->wait_lock); 52 spin_lock_init(&lock->wait_lock);
48 INIT_LIST_HEAD(&lock->wait_list); 53 INIT_LIST_HEAD(&lock->wait_list);
54 mutex_clear_owner(lock);
49 55
50 debug_mutex_init(lock, name, key); 56 debug_mutex_init(lock, name, key);
51} 57}
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock)
91 * 'unlocked' into 'locked' state. 97 * 'unlocked' into 'locked' state.
92 */ 98 */
93 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); 99 __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
100 mutex_set_owner(lock);
94} 101}
95 102
96EXPORT_SYMBOL(mutex_lock); 103EXPORT_SYMBOL(mutex_lock);
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock)
115 * The unlocking fastpath is the 0->1 transition from 'locked' 122 * The unlocking fastpath is the 0->1 transition from 'locked'
116 * into 'unlocked' state: 123 * into 'unlocked' state:
117 */ 124 */
125#ifndef CONFIG_DEBUG_MUTEXES
126 /*
127 * When debugging is enabled we must not clear the owner before time,
128 * the slow path will always be taken, and that clears the owner field
129 * after verifying that it was indeed current.
130 */
131 mutex_clear_owner(lock);
132#endif
118 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); 133 __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
119} 134}
120 135
@@ -129,21 +144,75 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
129{ 144{
130 struct task_struct *task = current; 145 struct task_struct *task = current;
131 struct mutex_waiter waiter; 146 struct mutex_waiter waiter;
132 unsigned int old_val;
133 unsigned long flags; 147 unsigned long flags;
134 148
149 preempt_disable();
150 mutex_acquire(&lock->dep_map, subclass, 0, ip);
151#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
152 /*
153 * Optimistic spinning.
154 *
155 * We try to spin for acquisition when we find that there are no
156 * pending waiters and the lock owner is currently running on a
157 * (different) CPU.
158 *
159 * The rationale is that if the lock owner is running, it is likely to
160 * release the lock soon.
161 *
162 * Since this needs the lock owner, and this mutex implementation
163 * doesn't track the owner atomically in the lock field, we need to
164 * track it non-atomically.
165 *
166 * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
167 * to serialize everything.
168 */
169
170 for (;;) {
171 struct thread_info *owner;
172
173 /*
174 * If there's an owner, wait for it to either
175 * release the lock or go to sleep.
176 */
177 owner = ACCESS_ONCE(lock->owner);
178 if (owner && !mutex_spin_on_owner(lock, owner))
179 break;
180
181 if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
182 lock_acquired(&lock->dep_map, ip);
183 mutex_set_owner(lock);
184 preempt_enable();
185 return 0;
186 }
187
188 /*
189 * When there's no owner, we might have preempted between the
190 * owner acquiring the lock and setting the owner field. If
191 * we're an RT task that will live-lock because we won't let
192 * the owner complete.
193 */
194 if (!owner && (need_resched() || rt_task(task)))
195 break;
196
197 /*
198 * The cpu_relax() call is a compiler barrier which forces
199 * everything in this loop to be re-loaded. We don't need
200 * memory barriers as we'll eventually observe the right
201 * values at the cost of a few extra spins.
202 */
203 cpu_relax();
204 }
205#endif
135 spin_lock_mutex(&lock->wait_lock, flags); 206 spin_lock_mutex(&lock->wait_lock, flags);
136 207
137 debug_mutex_lock_common(lock, &waiter); 208 debug_mutex_lock_common(lock, &waiter);
138 mutex_acquire(&lock->dep_map, subclass, 0, ip);
139 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); 209 debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
140 210
141 /* add waiting tasks to the end of the waitqueue (FIFO): */ 211 /* add waiting tasks to the end of the waitqueue (FIFO): */
142 list_add_tail(&waiter.list, &lock->wait_list); 212 list_add_tail(&waiter.list, &lock->wait_list);
143 waiter.task = task; 213 waiter.task = task;
144 214
145 old_val = atomic_xchg(&lock->count, -1); 215 if (atomic_xchg(&lock->count, -1) == 1)
146 if (old_val == 1)
147 goto done; 216 goto done;
148 217
149 lock_contended(&lock->dep_map, ip); 218 lock_contended(&lock->dep_map, ip);
@@ -158,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
158 * that when we release the lock, we properly wake up the 227 * that when we release the lock, we properly wake up the
159 * other waiters: 228 * other waiters:
160 */ 229 */
161 old_val = atomic_xchg(&lock->count, -1); 230 if (atomic_xchg(&lock->count, -1) == 1)
162 if (old_val == 1)
163 break; 231 break;
164 232
165 /* 233 /*
@@ -173,21 +241,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
173 spin_unlock_mutex(&lock->wait_lock, flags); 241 spin_unlock_mutex(&lock->wait_lock, flags);
174 242
175 debug_mutex_free_waiter(&waiter); 243 debug_mutex_free_waiter(&waiter);
244 preempt_enable();
176 return -EINTR; 245 return -EINTR;
177 } 246 }
178 __set_task_state(task, state); 247 __set_task_state(task, state);
179 248
180 /* didnt get the lock, go to sleep: */ 249 /* didnt get the lock, go to sleep: */
181 spin_unlock_mutex(&lock->wait_lock, flags); 250 spin_unlock_mutex(&lock->wait_lock, flags);
182 schedule(); 251 __schedule();
183 spin_lock_mutex(&lock->wait_lock, flags); 252 spin_lock_mutex(&lock->wait_lock, flags);
184 } 253 }
185 254
186done: 255done:
187 lock_acquired(&lock->dep_map, ip); 256 lock_acquired(&lock->dep_map, ip);
188 /* got the lock - rejoice! */ 257 /* got the lock - rejoice! */
189 mutex_remove_waiter(lock, &waiter, task_thread_info(task)); 258 mutex_remove_waiter(lock, &waiter, current_thread_info());
190 debug_mutex_set_owner(lock, task_thread_info(task)); 259 mutex_set_owner(lock);
191 260
192 /* set it to 0 if there are no waiters left: */ 261 /* set it to 0 if there are no waiters left: */
193 if (likely(list_empty(&lock->wait_list))) 262 if (likely(list_empty(&lock->wait_list)))
@@ -196,6 +265,7 @@ done:
196 spin_unlock_mutex(&lock->wait_lock, flags); 265 spin_unlock_mutex(&lock->wait_lock, flags);
197 266
198 debug_mutex_free_waiter(&waiter); 267 debug_mutex_free_waiter(&waiter);
268 preempt_enable();
199 269
200 return 0; 270 return 0;
201} 271}
@@ -222,7 +292,8 @@ int __sched
222mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) 292mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
223{ 293{
224 might_sleep(); 294 might_sleep();
225 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); 295 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
296 subclass, _RET_IP_);
226} 297}
227 298
228EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); 299EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
@@ -260,8 +331,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
260 wake_up_process(waiter->task); 331 wake_up_process(waiter->task);
261 } 332 }
262 333
263 debug_mutex_clear_owner(lock);
264
265 spin_unlock_mutex(&lock->wait_lock, flags); 334 spin_unlock_mutex(&lock->wait_lock, flags);
266} 335}
267 336
@@ -298,18 +367,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
298 */ 367 */
299int __sched mutex_lock_interruptible(struct mutex *lock) 368int __sched mutex_lock_interruptible(struct mutex *lock)
300{ 369{
370 int ret;
371
301 might_sleep(); 372 might_sleep();
302 return __mutex_fastpath_lock_retval 373 ret = __mutex_fastpath_lock_retval
303 (&lock->count, __mutex_lock_interruptible_slowpath); 374 (&lock->count, __mutex_lock_interruptible_slowpath);
375 if (!ret)
376 mutex_set_owner(lock);
377
378 return ret;
304} 379}
305 380
306EXPORT_SYMBOL(mutex_lock_interruptible); 381EXPORT_SYMBOL(mutex_lock_interruptible);
307 382
308int __sched mutex_lock_killable(struct mutex *lock) 383int __sched mutex_lock_killable(struct mutex *lock)
309{ 384{
385 int ret;
386
310 might_sleep(); 387 might_sleep();
311 return __mutex_fastpath_lock_retval 388 ret = __mutex_fastpath_lock_retval
312 (&lock->count, __mutex_lock_killable_slowpath); 389 (&lock->count, __mutex_lock_killable_slowpath);
390 if (!ret)
391 mutex_set_owner(lock);
392
393 return ret;
313} 394}
314EXPORT_SYMBOL(mutex_lock_killable); 395EXPORT_SYMBOL(mutex_lock_killable);
315 396
@@ -352,9 +433,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
352 433
353 prev = atomic_xchg(&lock->count, -1); 434 prev = atomic_xchg(&lock->count, -1);
354 if (likely(prev == 1)) { 435 if (likely(prev == 1)) {
355 debug_mutex_set_owner(lock, current_thread_info()); 436 mutex_set_owner(lock);
356 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); 437 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
357 } 438 }
439
358 /* Set it back to 0 if there are no waiters: */ 440 /* Set it back to 0 if there are no waiters: */
359 if (likely(list_empty(&lock->wait_list))) 441 if (likely(list_empty(&lock->wait_list)))
360 atomic_set(&lock->count, 0); 442 atomic_set(&lock->count, 0);
@@ -380,8 +462,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
380 */ 462 */
381int __sched mutex_trylock(struct mutex *lock) 463int __sched mutex_trylock(struct mutex *lock)
382{ 464{
383 return __mutex_fastpath_trylock(&lock->count, 465 int ret;
384 __mutex_trylock_slowpath); 466
467 ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
468 if (ret)
469 mutex_set_owner(lock);
470
471 return ret;
385} 472}
386 473
387EXPORT_SYMBOL(mutex_trylock); 474EXPORT_SYMBOL(mutex_trylock);
diff --git a/kernel/mutex.h b/kernel/mutex.h
index a075dafbb290..67578ca48f94 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -16,8 +16,26 @@
16#define mutex_remove_waiter(lock, waiter, ti) \ 16#define mutex_remove_waiter(lock, waiter, ti) \
17 __list_del((waiter)->list.prev, (waiter)->list.next) 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18
19#define debug_mutex_set_owner(lock, new_owner) do { } while (0) 19#ifdef CONFIG_SMP
20#define debug_mutex_clear_owner(lock) do { } while (0) 20static inline void mutex_set_owner(struct mutex *lock)
21{
22 lock->owner = current_thread_info();
23}
24
25static inline void mutex_clear_owner(struct mutex *lock)
26{
27 lock->owner = NULL;
28}
29#else
30static inline void mutex_set_owner(struct mutex *lock)
31{
32}
33
34static inline void mutex_clear_owner(struct mutex *lock)
35{
36}
37#endif
38
21#define debug_mutex_wake_waiter(lock, waiter) do { } while (0) 39#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
22#define debug_mutex_free_waiter(waiter) do { } while (0) 40#define debug_mutex_free_waiter(waiter) do { } while (0)
23#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) 41#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c
index 78bc3fdac0d2..5aa854f9e5ae 100644
--- a/kernel/ns_cgroup.c
+++ b/kernel/ns_cgroup.c
@@ -34,7 +34,7 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
34 34
35/* 35/*
36 * Rules: 36 * Rules:
37 * 1. you can only enter a cgroup which is a child of your current 37 * 1. you can only enter a cgroup which is a descendant of your current
38 * cgroup 38 * cgroup
39 * 2. you can only place another process into a cgroup if 39 * 2. you can only place another process into a cgroup if
40 * a. you have CAP_SYS_ADMIN 40 * a. you have CAP_SYS_ADMIN
@@ -45,21 +45,15 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid)
45static int ns_can_attach(struct cgroup_subsys *ss, 45static int ns_can_attach(struct cgroup_subsys *ss,
46 struct cgroup *new_cgroup, struct task_struct *task) 46 struct cgroup *new_cgroup, struct task_struct *task)
47{ 47{
48 struct cgroup *orig;
49
50 if (current != task) { 48 if (current != task) {
51 if (!capable(CAP_SYS_ADMIN)) 49 if (!capable(CAP_SYS_ADMIN))
52 return -EPERM; 50 return -EPERM;
53 51
54 if (!cgroup_is_descendant(new_cgroup)) 52 if (!cgroup_is_descendant(new_cgroup, current))
55 return -EPERM; 53 return -EPERM;
56 } 54 }
57 55
58 if (atomic_read(&new_cgroup->count) != 0) 56 if (!cgroup_is_descendant(new_cgroup, task))
59 return -EPERM;
60
61 orig = task_cgroup(task, ns_subsys_id);
62 if (orig && orig != new_cgroup->parent)
63 return -EPERM; 57 return -EPERM;
64 58
65 return 0; 59 return 0;
@@ -77,7 +71,7 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss,
77 71
78 if (!capable(CAP_SYS_ADMIN)) 72 if (!capable(CAP_SYS_ADMIN))
79 return ERR_PTR(-EPERM); 73 return ERR_PTR(-EPERM);
80 if (!cgroup_is_descendant(cgroup)) 74 if (!cgroup_is_descendant(cgroup, current))
81 return ERR_PTR(-EPERM); 75 return ERR_PTR(-EPERM);
82 76
83 ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL); 77 ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL);
diff --git a/kernel/panic.c b/kernel/panic.c
index 2a2ff36ff44d..3fd8c5bf8b39 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -8,19 +8,19 @@
8 * This function is used through-out the kernel (including mm and fs) 8 * This function is used through-out the kernel (including mm and fs)
9 * to indicate a major problem. 9 * to indicate a major problem.
10 */ 10 */
11#include <linux/debug_locks.h>
12#include <linux/interrupt.h>
13#include <linux/kallsyms.h>
14#include <linux/notifier.h>
11#include <linux/module.h> 15#include <linux/module.h>
12#include <linux/sched.h> 16#include <linux/random.h>
13#include <linux/delay.h>
14#include <linux/reboot.h> 17#include <linux/reboot.h>
15#include <linux/notifier.h> 18#include <linux/delay.h>
16#include <linux/init.h> 19#include <linux/kexec.h>
20#include <linux/sched.h>
17#include <linux/sysrq.h> 21#include <linux/sysrq.h>
18#include <linux/interrupt.h> 22#include <linux/init.h>
19#include <linux/nmi.h> 23#include <linux/nmi.h>
20#include <linux/kexec.h>
21#include <linux/debug_locks.h>
22#include <linux/random.h>
23#include <linux/kallsyms.h>
24#include <linux/dmi.h> 24#include <linux/dmi.h>
25 25
26int panic_on_oops; 26int panic_on_oops;
@@ -52,19 +52,15 @@ EXPORT_SYMBOL(panic_blink);
52 * 52 *
53 * This function never returns. 53 * This function never returns.
54 */ 54 */
55
56NORET_TYPE void panic(const char * fmt, ...) 55NORET_TYPE void panic(const char * fmt, ...)
57{ 56{
58 long i;
59 static char buf[1024]; 57 static char buf[1024];
60 va_list args; 58 va_list args;
61#if defined(CONFIG_S390) 59 long i;
62 unsigned long caller = (unsigned long) __builtin_return_address(0);
63#endif
64 60
65 /* 61 /*
66 * It's possible to come here directly from a panic-assertion and not 62 * It's possible to come here directly from a panic-assertion and
67 * have preempt disabled. Some functions called from here want 63 * not have preempt disabled. Some functions called from here want
68 * preempt to be disabled. No point enabling it later though... 64 * preempt to be disabled. No point enabling it later though...
69 */ 65 */
70 preempt_disable(); 66 preempt_disable();
@@ -74,7 +70,9 @@ NORET_TYPE void panic(const char * fmt, ...)
74 vsnprintf(buf, sizeof(buf), fmt, args); 70 vsnprintf(buf, sizeof(buf), fmt, args);
75 va_end(args); 71 va_end(args);
76 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf); 72 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
77 bust_spinlocks(0); 73#ifdef CONFIG_DEBUG_BUGVERBOSE
74 dump_stack();
75#endif
78 76
79 /* 77 /*
80 * If we have crashed and we have a crash kernel loaded let it handle 78 * If we have crashed and we have a crash kernel loaded let it handle
@@ -83,14 +81,12 @@ NORET_TYPE void panic(const char * fmt, ...)
83 */ 81 */
84 crash_kexec(NULL); 82 crash_kexec(NULL);
85 83
86#ifdef CONFIG_SMP
87 /* 84 /*
88 * Note smp_send_stop is the usual smp shutdown function, which 85 * Note smp_send_stop is the usual smp shutdown function, which
89 * unfortunately means it may not be hardened to work in a panic 86 * unfortunately means it may not be hardened to work in a panic
90 * situation. 87 * situation.
91 */ 88 */
92 smp_send_stop(); 89 smp_send_stop();
93#endif
94 90
95 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 91 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
96 92
@@ -99,19 +95,21 @@ NORET_TYPE void panic(const char * fmt, ...)
99 95
100 if (panic_timeout > 0) { 96 if (panic_timeout > 0) {
101 /* 97 /*
102 * Delay timeout seconds before rebooting the machine. 98 * Delay timeout seconds before rebooting the machine.
103 * We can't use the "normal" timers since we just panicked.. 99 * We can't use the "normal" timers since we just panicked.
104 */ 100 */
105 printk(KERN_EMERG "Rebooting in %d seconds..",panic_timeout); 101 printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout);
102
106 for (i = 0; i < panic_timeout*1000; ) { 103 for (i = 0; i < panic_timeout*1000; ) {
107 touch_nmi_watchdog(); 104 touch_nmi_watchdog();
108 i += panic_blink(i); 105 i += panic_blink(i);
109 mdelay(1); 106 mdelay(1);
110 i++; 107 i++;
111 } 108 }
112 /* This will not be a clean reboot, with everything 109 /*
113 * shutting down. But if there is a chance of 110 * This will not be a clean reboot, with everything
114 * rebooting the system it will be rebooted. 111 * shutting down. But if there is a chance of
112 * rebooting the system it will be rebooted.
115 */ 113 */
116 emergency_restart(); 114 emergency_restart();
117 } 115 }
@@ -124,38 +122,44 @@ NORET_TYPE void panic(const char * fmt, ...)
124 } 122 }
125#endif 123#endif
126#if defined(CONFIG_S390) 124#if defined(CONFIG_S390)
127 disabled_wait(caller); 125 {
126 unsigned long caller;
127
128 caller = (unsigned long)__builtin_return_address(0);
129 disabled_wait(caller);
130 }
128#endif 131#endif
129 local_irq_enable(); 132 local_irq_enable();
130 for (i = 0;;) { 133 for (i = 0; ; ) {
131 touch_softlockup_watchdog(); 134 touch_softlockup_watchdog();
132 i += panic_blink(i); 135 i += panic_blink(i);
133 mdelay(1); 136 mdelay(1);
134 i++; 137 i++;
135 } 138 }
139 bust_spinlocks(0);
136} 140}
137 141
138EXPORT_SYMBOL(panic); 142EXPORT_SYMBOL(panic);
139 143
140 144
141struct tnt { 145struct tnt {
142 u8 bit; 146 u8 bit;
143 char true; 147 char true;
144 char false; 148 char false;
145}; 149};
146 150
147static const struct tnt tnts[] = { 151static const struct tnt tnts[] = {
148 { TAINT_PROPRIETARY_MODULE, 'P', 'G' }, 152 { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
149 { TAINT_FORCED_MODULE, 'F', ' ' }, 153 { TAINT_FORCED_MODULE, 'F', ' ' },
150 { TAINT_UNSAFE_SMP, 'S', ' ' }, 154 { TAINT_UNSAFE_SMP, 'S', ' ' },
151 { TAINT_FORCED_RMMOD, 'R', ' ' }, 155 { TAINT_FORCED_RMMOD, 'R', ' ' },
152 { TAINT_MACHINE_CHECK, 'M', ' ' }, 156 { TAINT_MACHINE_CHECK, 'M', ' ' },
153 { TAINT_BAD_PAGE, 'B', ' ' }, 157 { TAINT_BAD_PAGE, 'B', ' ' },
154 { TAINT_USER, 'U', ' ' }, 158 { TAINT_USER, 'U', ' ' },
155 { TAINT_DIE, 'D', ' ' }, 159 { TAINT_DIE, 'D', ' ' },
156 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' }, 160 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
157 { TAINT_WARN, 'W', ' ' }, 161 { TAINT_WARN, 'W', ' ' },
158 { TAINT_CRAP, 'C', ' ' }, 162 { TAINT_CRAP, 'C', ' ' },
159}; 163};
160 164
161/** 165/**
@@ -192,7 +196,8 @@ const char *print_tainted(void)
192 *s = 0; 196 *s = 0;
193 } else 197 } else
194 snprintf(buf, sizeof(buf), "Not tainted"); 198 snprintf(buf, sizeof(buf), "Not tainted");
195 return(buf); 199
200 return buf;
196} 201}
197 202
198int test_taint(unsigned flag) 203int test_taint(unsigned flag)
@@ -208,7 +213,8 @@ unsigned long get_taint(void)
208 213
209void add_taint(unsigned flag) 214void add_taint(unsigned flag)
210{ 215{
211 debug_locks = 0; /* can't trust the integrity of the kernel anymore */ 216 /* can't trust the integrity of the kernel anymore: */
217 debug_locks = 0;
212 set_bit(flag, &tainted_mask); 218 set_bit(flag, &tainted_mask);
213} 219}
214EXPORT_SYMBOL(add_taint); 220EXPORT_SYMBOL(add_taint);
@@ -263,8 +269,8 @@ static void do_oops_enter_exit(void)
263} 269}
264 270
265/* 271/*
266 * Return true if the calling CPU is allowed to print oops-related info. This 272 * Return true if the calling CPU is allowed to print oops-related info.
267 * is a bit racy.. 273 * This is a bit racy..
268 */ 274 */
269int oops_may_print(void) 275int oops_may_print(void)
270{ 276{
@@ -273,20 +279,22 @@ int oops_may_print(void)
273 279
274/* 280/*
275 * Called when the architecture enters its oops handler, before it prints 281 * Called when the architecture enters its oops handler, before it prints
276 * anything. If this is the first CPU to oops, and it's oopsing the first time 282 * anything. If this is the first CPU to oops, and it's oopsing the first
277 * then let it proceed. 283 * time then let it proceed.
278 * 284 *
279 * This is all enabled by the pause_on_oops kernel boot option. We do all this 285 * This is all enabled by the pause_on_oops kernel boot option. We do all
280 * to ensure that oopses don't scroll off the screen. It has the side-effect 286 * this to ensure that oopses don't scroll off the screen. It has the
281 * of preventing later-oopsing CPUs from mucking up the display, too. 287 * side-effect of preventing later-oopsing CPUs from mucking up the display,
288 * too.
282 * 289 *
283 * It turns out that the CPU which is allowed to print ends up pausing for the 290 * It turns out that the CPU which is allowed to print ends up pausing for
284 * right duration, whereas all the other CPUs pause for twice as long: once in 291 * the right duration, whereas all the other CPUs pause for twice as long:
285 * oops_enter(), once in oops_exit(). 292 * once in oops_enter(), once in oops_exit().
286 */ 293 */
287void oops_enter(void) 294void oops_enter(void)
288{ 295{
289 debug_locks_off(); /* can't trust the integrity of the kernel anymore */ 296 /* can't trust the integrity of the kernel anymore: */
297 debug_locks_off();
290 do_oops_enter_exit(); 298 do_oops_enter_exit();
291} 299}
292 300
@@ -355,15 +363,18 @@ EXPORT_SYMBOL(warn_slowpath);
355#endif 363#endif
356 364
357#ifdef CONFIG_CC_STACKPROTECTOR 365#ifdef CONFIG_CC_STACKPROTECTOR
366
358/* 367/*
359 * Called when gcc's -fstack-protector feature is used, and 368 * Called when gcc's -fstack-protector feature is used, and
360 * gcc detects corruption of the on-stack canary value 369 * gcc detects corruption of the on-stack canary value
361 */ 370 */
362void __stack_chk_fail(void) 371void __stack_chk_fail(void)
363{ 372{
364 panic("stack-protector: Kernel stack is corrupted"); 373 panic("stack-protector: Kernel stack is corrupted in: %p\n",
374 __builtin_return_address(0));
365} 375}
366EXPORT_SYMBOL(__stack_chk_fail); 376EXPORT_SYMBOL(__stack_chk_fail);
377
367#endif 378#endif
368 379
369core_param(panic, panic_timeout, int, 0644); 380core_param(panic, panic_timeout, int, 0644);
diff --git a/kernel/pid.c b/kernel/pid.c
index 1b3586fe753a..b2e5f78fd281 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -403,6 +403,8 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
403{ 403{
404 struct pid *pid; 404 struct pid *pid;
405 rcu_read_lock(); 405 rcu_read_lock();
406 if (type != PIDTYPE_PID)
407 task = task->group_leader;
406 pid = get_pid(task->pids[type].pid); 408 pid = get_pid(task->pids[type].pid);
407 rcu_read_unlock(); 409 rcu_read_unlock();
408 return pid; 410 return pid;
@@ -450,11 +452,24 @@ pid_t pid_vnr(struct pid *pid)
450} 452}
451EXPORT_SYMBOL_GPL(pid_vnr); 453EXPORT_SYMBOL_GPL(pid_vnr);
452 454
453pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 455pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
456 struct pid_namespace *ns)
454{ 457{
455 return pid_nr_ns(task_pid(tsk), ns); 458 pid_t nr = 0;
459
460 rcu_read_lock();
461 if (!ns)
462 ns = current->nsproxy->pid_ns;
463 if (likely(pid_alive(task))) {
464 if (type != PIDTYPE_PID)
465 task = task->group_leader;
466 nr = pid_nr_ns(task->pids[type].pid, ns);
467 }
468 rcu_read_unlock();
469
470 return nr;
456} 471}
457EXPORT_SYMBOL(task_pid_nr_ns); 472EXPORT_SYMBOL(__task_pid_nr_ns);
458 473
459pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 474pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
460{ 475{
@@ -462,18 +477,6 @@ pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
462} 477}
463EXPORT_SYMBOL(task_tgid_nr_ns); 478EXPORT_SYMBOL(task_tgid_nr_ns);
464 479
465pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
466{
467 return pid_nr_ns(task_pgrp(tsk), ns);
468}
469EXPORT_SYMBOL(task_pgrp_nr_ns);
470
471pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
472{
473 return pid_nr_ns(task_session(tsk), ns);
474}
475EXPORT_SYMBOL(task_session_nr_ns);
476
477struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) 480struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
478{ 481{
479 return ns_of_pid(task_pid(tsk)); 482 return ns_of_pid(task_pid(tsk));
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index fab8ea86fac3..2d1001b4858d 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -152,6 +152,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
152{ 152{
153 int nr; 153 int nr;
154 int rc; 154 int rc;
155 struct task_struct *task;
155 156
156 /* 157 /*
157 * The last thread in the cgroup-init thread group is terminating. 158 * The last thread in the cgroup-init thread group is terminating.
@@ -169,7 +170,19 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
169 read_lock(&tasklist_lock); 170 read_lock(&tasklist_lock);
170 nr = next_pidmap(pid_ns, 1); 171 nr = next_pidmap(pid_ns, 1);
171 while (nr > 0) { 172 while (nr > 0) {
172 kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr); 173 rcu_read_lock();
174
175 /*
176 * Use force_sig() since it clears SIGNAL_UNKILLABLE ensuring
177 * any nested-container's init processes don't ignore the
178 * signal
179 */
180 task = pid_task(find_vpid(nr), PIDTYPE_PID);
181 if (task)
182 force_sig(SIGKILL, task);
183
184 rcu_read_unlock();
185
173 nr = next_pidmap(pid_ns, nr); 186 nr = next_pidmap(pid_ns, nr);
174 } 187 }
175 read_unlock(&tasklist_lock); 188 read_unlock(&tasklist_lock);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e976e505648d..8e5d9a68b022 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1371 return 1; 1371 return 1;
1372 } 1372 }
1373 return 0; 1373
1374 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
1374} 1375}
1375 1376
1376/* 1377/*
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 4a4a206b1979..5f21ab2bbcdf 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -22,6 +22,7 @@
22#include <linux/console.h> 22#include <linux/console.h>
23#include <linux/cpu.h> 23#include <linux/cpu.h>
24#include <linux/freezer.h> 24#include <linux/freezer.h>
25#include <asm/suspend.h>
25 26
26#include "power.h" 27#include "power.h"
27 28
@@ -214,7 +215,7 @@ static int create_image(int platform_mode)
214 return error; 215 return error;
215 216
216 device_pm_lock(); 217 device_pm_lock();
217 local_irq_disable(); 218
218 /* At this point, device_suspend() has been called, but *not* 219 /* At this point, device_suspend() has been called, but *not*
219 * device_power_down(). We *must* call device_power_down() now. 220 * device_power_down(). We *must* call device_power_down() now.
220 * Otherwise, drivers for some devices (e.g. interrupt controllers) 221 * Otherwise, drivers for some devices (e.g. interrupt controllers)
@@ -225,13 +226,25 @@ static int create_image(int platform_mode)
225 if (error) { 226 if (error) {
226 printk(KERN_ERR "PM: Some devices failed to power down, " 227 printk(KERN_ERR "PM: Some devices failed to power down, "
227 "aborting hibernation\n"); 228 "aborting hibernation\n");
228 goto Enable_irqs; 229 goto Unlock;
229 } 230 }
231
232 error = platform_pre_snapshot(platform_mode);
233 if (error || hibernation_test(TEST_PLATFORM))
234 goto Platform_finish;
235
236 error = disable_nonboot_cpus();
237 if (error || hibernation_test(TEST_CPUS)
238 || hibernation_testmode(HIBERNATION_TEST))
239 goto Enable_cpus;
240
241 local_irq_disable();
242
230 sysdev_suspend(PMSG_FREEZE); 243 sysdev_suspend(PMSG_FREEZE);
231 if (error) { 244 if (error) {
232 printk(KERN_ERR "PM: Some devices failed to power down, " 245 printk(KERN_ERR "PM: Some devices failed to power down, "
233 "aborting hibernation\n"); 246 "aborting hibernation\n");
234 goto Power_up_devices; 247 goto Enable_irqs;
235 } 248 }
236 249
237 if (hibernation_test(TEST_CORE)) 250 if (hibernation_test(TEST_CORE))
@@ -247,17 +260,28 @@ static int create_image(int platform_mode)
247 restore_processor_state(); 260 restore_processor_state();
248 if (!in_suspend) 261 if (!in_suspend)
249 platform_leave(platform_mode); 262 platform_leave(platform_mode);
263
250 Power_up: 264 Power_up:
251 sysdev_resume(); 265 sysdev_resume();
252 /* NOTE: device_power_up() is just a resume() for devices 266 /* NOTE: device_power_up() is just a resume() for devices
253 * that suspended with irqs off ... no overall powerup. 267 * that suspended with irqs off ... no overall powerup.
254 */ 268 */
255 Power_up_devices: 269
256 device_power_up(in_suspend ?
257 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
258 Enable_irqs: 270 Enable_irqs:
259 local_irq_enable(); 271 local_irq_enable();
272
273 Enable_cpus:
274 enable_nonboot_cpus();
275
276 Platform_finish:
277 platform_finish(platform_mode);
278
279 device_power_up(in_suspend ?
280 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
281
282 Unlock:
260 device_pm_unlock(); 283 device_pm_unlock();
284
261 return error; 285 return error;
262} 286}
263 287
@@ -265,7 +289,7 @@ static int create_image(int platform_mode)
265 * hibernation_snapshot - quiesce devices and create the hibernation 289 * hibernation_snapshot - quiesce devices and create the hibernation
266 * snapshot image. 290 * snapshot image.
267 * @platform_mode - if set, use the platform driver, if available, to 291 * @platform_mode - if set, use the platform driver, if available, to
268 * prepare the platform frimware for the power transition. 292 * prepare the platform firmware for the power transition.
269 * 293 *
270 * Must be called with pm_mutex held 294 * Must be called with pm_mutex held
271 */ 295 */
@@ -291,25 +315,9 @@ int hibernation_snapshot(int platform_mode)
291 if (hibernation_test(TEST_DEVICES)) 315 if (hibernation_test(TEST_DEVICES))
292 goto Recover_platform; 316 goto Recover_platform;
293 317
294 error = platform_pre_snapshot(platform_mode); 318 error = create_image(platform_mode);
295 if (error || hibernation_test(TEST_PLATFORM)) 319 /* Control returns here after successful restore */
296 goto Finish;
297
298 error = disable_nonboot_cpus();
299 if (!error) {
300 if (hibernation_test(TEST_CPUS))
301 goto Enable_cpus;
302
303 if (hibernation_testmode(HIBERNATION_TEST))
304 goto Enable_cpus;
305 320
306 error = create_image(platform_mode);
307 /* Control returns here after successful restore */
308 }
309 Enable_cpus:
310 enable_nonboot_cpus();
311 Finish:
312 platform_finish(platform_mode);
313 Resume_devices: 321 Resume_devices:
314 device_resume(in_suspend ? 322 device_resume(in_suspend ?
315 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 323 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
@@ -331,19 +339,33 @@ int hibernation_snapshot(int platform_mode)
331 * kernel. 339 * kernel.
332 */ 340 */
333 341
334static int resume_target_kernel(void) 342static int resume_target_kernel(bool platform_mode)
335{ 343{
336 int error; 344 int error;
337 345
338 device_pm_lock(); 346 device_pm_lock();
339 local_irq_disable(); 347
340 error = device_power_down(PMSG_QUIESCE); 348 error = device_power_down(PMSG_QUIESCE);
341 if (error) { 349 if (error) {
342 printk(KERN_ERR "PM: Some devices failed to power down, " 350 printk(KERN_ERR "PM: Some devices failed to power down, "
343 "aborting resume\n"); 351 "aborting resume\n");
344 goto Enable_irqs; 352 goto Unlock;
345 } 353 }
346 sysdev_suspend(PMSG_QUIESCE); 354
355 error = platform_pre_restore(platform_mode);
356 if (error)
357 goto Cleanup;
358
359 error = disable_nonboot_cpus();
360 if (error)
361 goto Enable_cpus;
362
363 local_irq_disable();
364
365 error = sysdev_suspend(PMSG_QUIESCE);
366 if (error)
367 goto Enable_irqs;
368
347 /* We'll ignore saved state, but this gets preempt count (etc) right */ 369 /* We'll ignore saved state, but this gets preempt count (etc) right */
348 save_processor_state(); 370 save_processor_state();
349 error = restore_highmem(); 371 error = restore_highmem();
@@ -366,11 +388,23 @@ static int resume_target_kernel(void)
366 swsusp_free(); 388 swsusp_free();
367 restore_processor_state(); 389 restore_processor_state();
368 touch_softlockup_watchdog(); 390 touch_softlockup_watchdog();
391
369 sysdev_resume(); 392 sysdev_resume();
370 device_power_up(PMSG_RECOVER); 393
371 Enable_irqs: 394 Enable_irqs:
372 local_irq_enable(); 395 local_irq_enable();
396
397 Enable_cpus:
398 enable_nonboot_cpus();
399
400 Cleanup:
401 platform_restore_cleanup(platform_mode);
402
403 device_power_up(PMSG_RECOVER);
404
405 Unlock:
373 device_pm_unlock(); 406 device_pm_unlock();
407
374 return error; 408 return error;
375} 409}
376 410
@@ -378,7 +412,7 @@ static int resume_target_kernel(void)
378 * hibernation_restore - quiesce devices and restore the hibernation 412 * hibernation_restore - quiesce devices and restore the hibernation
379 * snapshot image. If successful, control returns in hibernation_snaphot() 413 * snapshot image. If successful, control returns in hibernation_snaphot()
380 * @platform_mode - if set, use the platform driver, if available, to 414 * @platform_mode - if set, use the platform driver, if available, to
381 * prepare the platform frimware for the transition. 415 * prepare the platform firmware for the transition.
382 * 416 *
383 * Must be called with pm_mutex held 417 * Must be called with pm_mutex held
384 */ 418 */
@@ -390,19 +424,10 @@ int hibernation_restore(int platform_mode)
390 pm_prepare_console(); 424 pm_prepare_console();
391 suspend_console(); 425 suspend_console();
392 error = device_suspend(PMSG_QUIESCE); 426 error = device_suspend(PMSG_QUIESCE);
393 if (error)
394 goto Finish;
395
396 error = platform_pre_restore(platform_mode);
397 if (!error) { 427 if (!error) {
398 error = disable_nonboot_cpus(); 428 error = resume_target_kernel(platform_mode);
399 if (!error) 429 device_resume(PMSG_RECOVER);
400 error = resume_target_kernel();
401 enable_nonboot_cpus();
402 } 430 }
403 platform_restore_cleanup(platform_mode);
404 device_resume(PMSG_RECOVER);
405 Finish:
406 resume_console(); 431 resume_console();
407 pm_restore_console(); 432 pm_restore_console();
408 return error; 433 return error;
@@ -438,38 +463,46 @@ int hibernation_platform_enter(void)
438 goto Resume_devices; 463 goto Resume_devices;
439 } 464 }
440 465
466 device_pm_lock();
467
468 error = device_power_down(PMSG_HIBERNATE);
469 if (error)
470 goto Unlock;
471
441 error = hibernation_ops->prepare(); 472 error = hibernation_ops->prepare();
442 if (error) 473 if (error)
443 goto Resume_devices; 474 goto Platofrm_finish;
444 475
445 error = disable_nonboot_cpus(); 476 error = disable_nonboot_cpus();
446 if (error) 477 if (error)
447 goto Finish; 478 goto Platofrm_finish;
448 479
449 device_pm_lock();
450 local_irq_disable(); 480 local_irq_disable();
451 error = device_power_down(PMSG_HIBERNATE); 481 sysdev_suspend(PMSG_HIBERNATE);
452 if (!error) { 482 hibernation_ops->enter();
453 sysdev_suspend(PMSG_HIBERNATE); 483 /* We should never get here */
454 hibernation_ops->enter(); 484 while (1);
455 /* We should never get here */
456 while (1);
457 }
458 local_irq_enable();
459 device_pm_unlock();
460 485
461 /* 486 /*
462 * We don't need to reenable the nonboot CPUs or resume consoles, since 487 * We don't need to reenable the nonboot CPUs or resume consoles, since
463 * the system is going to be halted anyway. 488 * the system is going to be halted anyway.
464 */ 489 */
465 Finish: 490 Platofrm_finish:
466 hibernation_ops->finish(); 491 hibernation_ops->finish();
492
493 device_power_up(PMSG_RESTORE);
494
495 Unlock:
496 device_pm_unlock();
497
467 Resume_devices: 498 Resume_devices:
468 entering_platform_hibernation = false; 499 entering_platform_hibernation = false;
469 device_resume(PMSG_RESTORE); 500 device_resume(PMSG_RESTORE);
470 resume_console(); 501 resume_console();
502
471 Close: 503 Close:
472 hibernation_ops->end(); 504 hibernation_ops->end();
505
473 return error; 506 return error;
474} 507}
475 508
diff --git a/kernel/power/main.c b/kernel/power/main.c
index c9632f841f64..f172f41858bb 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -287,17 +287,32 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
287 */ 287 */
288static int suspend_enter(suspend_state_t state) 288static int suspend_enter(suspend_state_t state)
289{ 289{
290 int error = 0; 290 int error;
291 291
292 device_pm_lock(); 292 device_pm_lock();
293 arch_suspend_disable_irqs();
294 BUG_ON(!irqs_disabled());
295 293
296 if ((error = device_power_down(PMSG_SUSPEND))) { 294 error = device_power_down(PMSG_SUSPEND);
295 if (error) {
297 printk(KERN_ERR "PM: Some devices failed to power down\n"); 296 printk(KERN_ERR "PM: Some devices failed to power down\n");
298 goto Done; 297 goto Done;
299 } 298 }
300 299
300 if (suspend_ops->prepare) {
301 error = suspend_ops->prepare();
302 if (error)
303 goto Power_up_devices;
304 }
305
306 if (suspend_test(TEST_PLATFORM))
307 goto Platfrom_finish;
308
309 error = disable_nonboot_cpus();
310 if (error || suspend_test(TEST_CPUS))
311 goto Enable_cpus;
312
313 arch_suspend_disable_irqs();
314 BUG_ON(!irqs_disabled());
315
301 error = sysdev_suspend(PMSG_SUSPEND); 316 error = sysdev_suspend(PMSG_SUSPEND);
302 if (!error) { 317 if (!error) {
303 if (!suspend_test(TEST_CORE)) 318 if (!suspend_test(TEST_CORE))
@@ -305,11 +320,22 @@ static int suspend_enter(suspend_state_t state)
305 sysdev_resume(); 320 sysdev_resume();
306 } 321 }
307 322
308 device_power_up(PMSG_RESUME);
309 Done:
310 arch_suspend_enable_irqs(); 323 arch_suspend_enable_irqs();
311 BUG_ON(irqs_disabled()); 324 BUG_ON(irqs_disabled());
325
326 Enable_cpus:
327 enable_nonboot_cpus();
328
329 Platfrom_finish:
330 if (suspend_ops->finish)
331 suspend_ops->finish();
332
333 Power_up_devices:
334 device_power_up(PMSG_RESUME);
335
336 Done:
312 device_pm_unlock(); 337 device_pm_unlock();
338
313 return error; 339 return error;
314} 340}
315 341
@@ -341,23 +367,8 @@ int suspend_devices_and_enter(suspend_state_t state)
341 if (suspend_test(TEST_DEVICES)) 367 if (suspend_test(TEST_DEVICES))
342 goto Recover_platform; 368 goto Recover_platform;
343 369
344 if (suspend_ops->prepare) { 370 suspend_enter(state);
345 error = suspend_ops->prepare();
346 if (error)
347 goto Resume_devices;
348 }
349
350 if (suspend_test(TEST_PLATFORM))
351 goto Finish;
352
353 error = disable_nonboot_cpus();
354 if (!error && !suspend_test(TEST_CPUS))
355 suspend_enter(state);
356 371
357 enable_nonboot_cpus();
358 Finish:
359 if (suspend_ops->finish)
360 suspend_ops->finish();
361 Resume_devices: 372 Resume_devices:
362 suspend_test_start(); 373 suspend_test_start();
363 device_resume(PMSG_RESUME); 374 device_resume(PMSG_RESUME);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f5fc2d7680f2..33e2e4a819f9 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -321,13 +321,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
321 321
322 INIT_LIST_HEAD(list); 322 INIT_LIST_HEAD(list);
323 323
324 for_each_zone(zone) { 324 for_each_populated_zone(zone) {
325 unsigned long zone_start, zone_end; 325 unsigned long zone_start, zone_end;
326 struct mem_extent *ext, *cur, *aux; 326 struct mem_extent *ext, *cur, *aux;
327 327
328 if (!populated_zone(zone))
329 continue;
330
331 zone_start = zone->zone_start_pfn; 328 zone_start = zone->zone_start_pfn;
332 zone_end = zone->zone_start_pfn + zone->spanned_pages; 329 zone_end = zone->zone_start_pfn + zone->spanned_pages;
333 330
@@ -804,8 +801,8 @@ static unsigned int count_free_highmem_pages(void)
804 struct zone *zone; 801 struct zone *zone;
805 unsigned int cnt = 0; 802 unsigned int cnt = 0;
806 803
807 for_each_zone(zone) 804 for_each_populated_zone(zone)
808 if (populated_zone(zone) && is_highmem(zone)) 805 if (is_highmem(zone))
809 cnt += zone_page_state(zone, NR_FREE_PAGES); 806 cnt += zone_page_state(zone, NR_FREE_PAGES);
810 807
811 return cnt; 808 return cnt;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index a92c91451559..78c35047586d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -51,6 +51,7 @@
51#include <linux/highmem.h> 51#include <linux/highmem.h>
52#include <linux/time.h> 52#include <linux/time.h>
53#include <linux/rbtree.h> 53#include <linux/rbtree.h>
54#include <linux/io.h>
54 55
55#include "power.h" 56#include "power.h"
56 57
@@ -229,17 +230,16 @@ int swsusp_shrink_memory(void)
229 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; 230 size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES;
230 tmp = size; 231 tmp = size;
231 size += highmem_size; 232 size += highmem_size;
232 for_each_zone (zone) 233 for_each_populated_zone(zone) {
233 if (populated_zone(zone)) { 234 tmp += snapshot_additional_pages(zone);
234 tmp += snapshot_additional_pages(zone); 235 if (is_highmem(zone)) {
235 if (is_highmem(zone)) { 236 highmem_size -=
236 highmem_size -=
237 zone_page_state(zone, NR_FREE_PAGES); 237 zone_page_state(zone, NR_FREE_PAGES);
238 } else { 238 } else {
239 tmp -= zone_page_state(zone, NR_FREE_PAGES); 239 tmp -= zone_page_state(zone, NR_FREE_PAGES);
240 tmp += zone->lowmem_reserve[ZONE_NORMAL]; 240 tmp += zone->lowmem_reserve[ZONE_NORMAL];
241 }
242 } 241 }
242 }
243 243
244 if (highmem_size < 0) 244 if (highmem_size < 0)
245 highmem_size = 0; 245 highmem_size = 0;
diff --git a/kernel/printk.c b/kernel/printk.c
index e3602d0755b0..a5f61a9acedb 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -32,6 +32,7 @@
32#include <linux/security.h> 32#include <linux/security.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/kexec.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37 38
@@ -135,6 +136,24 @@ static char *log_buf = __log_buf;
135static int log_buf_len = __LOG_BUF_LEN; 136static int log_buf_len = __LOG_BUF_LEN;
136static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ 137static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
137 138
139#ifdef CONFIG_KEXEC
140/*
141 * This appends the listed symbols to /proc/vmcoreinfo
142 *
143 * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to
144 * obtain access to symbols that are otherwise very difficult to locate. These
145 * symbols are specifically used so that utilities can access and extract the
146 * dmesg log from a vmcore file after a crash.
147 */
148void log_buf_kexec_setup(void)
149{
150 VMCOREINFO_SYMBOL(log_buf);
151 VMCOREINFO_SYMBOL(log_end);
152 VMCOREINFO_SYMBOL(log_buf_len);
153 VMCOREINFO_SYMBOL(logged_chars);
154}
155#endif
156
138static int __init log_buf_len_setup(char *str) 157static int __init log_buf_len_setup(char *str)
139{ 158{
140 unsigned size = memparse(str, &str); 159 unsigned size = memparse(str, &str);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index c9cf48b21f05..5105f5a6a2ce 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -60,11 +60,15 @@ static void ptrace_untrace(struct task_struct *child)
60{ 60{
61 spin_lock(&child->sighand->siglock); 61 spin_lock(&child->sighand->siglock);
62 if (task_is_traced(child)) { 62 if (task_is_traced(child)) {
63 if (child->signal->flags & SIGNAL_STOP_STOPPED) { 63 /*
64 * If the group stop is completed or in progress,
65 * this thread was already counted as stopped.
66 */
67 if (child->signal->flags & SIGNAL_STOP_STOPPED ||
68 child->signal->group_stop_count)
64 __set_task_state(child, TASK_STOPPED); 69 __set_task_state(child, TASK_STOPPED);
65 } else { 70 else
66 signal_wake_up(child, 1); 71 signal_wake_up(child, 1);
67 }
68 } 72 }
69 spin_unlock(&child->sighand->siglock); 73 spin_unlock(&child->sighand->siglock);
70} 74}
@@ -235,18 +239,58 @@ out:
235 return retval; 239 return retval;
236} 240}
237 241
238static inline void __ptrace_detach(struct task_struct *child, unsigned int data) 242/*
243 * Called with irqs disabled, returns true if childs should reap themselves.
244 */
245static int ignoring_children(struct sighand_struct *sigh)
239{ 246{
240 child->exit_code = data; 247 int ret;
241 /* .. re-parent .. */ 248 spin_lock(&sigh->siglock);
242 __ptrace_unlink(child); 249 ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) ||
243 /* .. and wake it up. */ 250 (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT);
244 if (child->exit_state != EXIT_ZOMBIE) 251 spin_unlock(&sigh->siglock);
245 wake_up_process(child); 252 return ret;
253}
254
255/*
256 * Called with tasklist_lock held for writing.
257 * Unlink a traced task, and clean it up if it was a traced zombie.
258 * Return true if it needs to be reaped with release_task().
259 * (We can't call release_task() here because we already hold tasklist_lock.)
260 *
261 * If it's a zombie, our attachedness prevented normal parent notification
262 * or self-reaping. Do notification now if it would have happened earlier.
263 * If it should reap itself, return true.
264 *
265 * If it's our own child, there is no notification to do.
266 * But if our normal children self-reap, then this child
267 * was prevented by ptrace and we must reap it now.
268 */
269static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
270{
271 __ptrace_unlink(p);
272
273 if (p->exit_state == EXIT_ZOMBIE) {
274 if (!task_detached(p) && thread_group_empty(p)) {
275 if (!same_thread_group(p->real_parent, tracer))
276 do_notify_parent(p, p->exit_signal);
277 else if (ignoring_children(tracer->sighand))
278 p->exit_signal = -1;
279 }
280 if (task_detached(p)) {
281 /* Mark it as in the process of being reaped. */
282 p->exit_state = EXIT_DEAD;
283 return true;
284 }
285 }
286
287 return false;
246} 288}
247 289
248int ptrace_detach(struct task_struct *child, unsigned int data) 290int ptrace_detach(struct task_struct *child, unsigned int data)
249{ 291{
292 bool dead = false;
293
250 if (!valid_signal(data)) 294 if (!valid_signal(data))
251 return -EIO; 295 return -EIO;
252 296
@@ -255,14 +299,45 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
255 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); 299 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
256 300
257 write_lock_irq(&tasklist_lock); 301 write_lock_irq(&tasklist_lock);
258 /* protect against de_thread()->release_task() */ 302 /*
259 if (child->ptrace) 303 * This child can be already killed. Make sure de_thread() or
260 __ptrace_detach(child, data); 304 * our sub-thread doing do_wait() didn't do release_task() yet.
305 */
306 if (child->ptrace) {
307 child->exit_code = data;
308 dead = __ptrace_detach(current, child);
309 }
261 write_unlock_irq(&tasklist_lock); 310 write_unlock_irq(&tasklist_lock);
262 311
312 if (unlikely(dead))
313 release_task(child);
314
263 return 0; 315 return 0;
264} 316}
265 317
318/*
319 * Detach all tasks we were using ptrace on.
320 */
321void exit_ptrace(struct task_struct *tracer)
322{
323 struct task_struct *p, *n;
324 LIST_HEAD(ptrace_dead);
325
326 write_lock_irq(&tasklist_lock);
327 list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) {
328 if (__ptrace_detach(tracer, p))
329 list_add(&p->ptrace_entry, &ptrace_dead);
330 }
331 write_unlock_irq(&tasklist_lock);
332
333 BUG_ON(!list_empty(&tracer->ptraced));
334
335 list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) {
336 list_del_init(&p->ptrace_entry);
337 release_task(p);
338 }
339}
340
266int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) 341int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
267{ 342{
268 int copied = 0; 343 int copied = 0;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index cae8a059cf47..2c7b8457d0d2 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -122,6 +122,8 @@ static void rcu_barrier_func(void *type)
122 } 122 }
123} 123}
124 124
125static inline void wait_migrated_callbacks(void);
126
125/* 127/*
126 * Orchestrate the specified type of RCU barrier, waiting for all 128 * Orchestrate the specified type of RCU barrier, waiting for all
127 * RCU callbacks of the specified type to complete. 129 * RCU callbacks of the specified type to complete.
@@ -147,6 +149,7 @@ static void _rcu_barrier(enum rcu_barrier type)
147 complete(&rcu_barrier_completion); 149 complete(&rcu_barrier_completion);
148 wait_for_completion(&rcu_barrier_completion); 150 wait_for_completion(&rcu_barrier_completion);
149 mutex_unlock(&rcu_barrier_mutex); 151 mutex_unlock(&rcu_barrier_mutex);
152 wait_migrated_callbacks();
150} 153}
151 154
152/** 155/**
@@ -176,9 +179,50 @@ void rcu_barrier_sched(void)
176} 179}
177EXPORT_SYMBOL_GPL(rcu_barrier_sched); 180EXPORT_SYMBOL_GPL(rcu_barrier_sched);
178 181
182static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
183static struct rcu_head rcu_migrate_head[3];
184static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
185
186static void rcu_migrate_callback(struct rcu_head *notused)
187{
188 if (atomic_dec_and_test(&rcu_migrate_type_count))
189 wake_up(&rcu_migrate_wq);
190}
191
192static inline void wait_migrated_callbacks(void)
193{
194 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
195}
196
197static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
198 unsigned long action, void *hcpu)
199{
200 if (action == CPU_DYING) {
201 /*
202 * preempt_disable() in on_each_cpu() prevents stop_machine(),
203 * so when "on_each_cpu(rcu_barrier_func, (void *)type, 1);"
204 * returns, all online cpus have queued rcu_barrier_func(),
205 * and the dead cpu(if it exist) queues rcu_migrate_callback()s.
206 *
207 * These callbacks ensure _rcu_barrier() waits for all
208 * RCU callbacks of the specified type to complete.
209 */
210 atomic_set(&rcu_migrate_type_count, 3);
211 call_rcu_bh(rcu_migrate_head, rcu_migrate_callback);
212 call_rcu_sched(rcu_migrate_head + 1, rcu_migrate_callback);
213 call_rcu(rcu_migrate_head + 2, rcu_migrate_callback);
214 } else if (action == CPU_POST_DEAD) {
215 /* rcu_migrate_head is protected by cpu_add_remove_lock */
216 wait_migrated_callbacks();
217 }
218
219 return NOTIFY_OK;
220}
221
179void __init rcu_init(void) 222void __init rcu_init(void)
180{ 223{
181 __rcu_init(); 224 __rcu_init();
225 hotcpu_notifier(rcu_barrier_cpu_hotplug, 0);
182} 226}
183 227
184void rcu_scheduler_starting(void) 228void rcu_scheduler_starting(void)
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 7c4142a79f0a..9b4a975a4b4a 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -126,6 +126,7 @@ static atomic_t n_rcu_torture_mberror;
126static atomic_t n_rcu_torture_error; 126static atomic_t n_rcu_torture_error;
127static long n_rcu_torture_timers = 0; 127static long n_rcu_torture_timers = 0;
128static struct list_head rcu_torture_removed; 128static struct list_head rcu_torture_removed;
129static cpumask_var_t shuffle_tmp_mask;
129 130
130static int stutter_pause_test = 0; 131static int stutter_pause_test = 0;
131 132
@@ -889,10 +890,9 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
889 */ 890 */
890static void rcu_torture_shuffle_tasks(void) 891static void rcu_torture_shuffle_tasks(void)
891{ 892{
892 cpumask_t tmp_mask;
893 int i; 893 int i;
894 894
895 cpus_setall(tmp_mask); 895 cpumask_setall(shuffle_tmp_mask);
896 get_online_cpus(); 896 get_online_cpus();
897 897
898 /* No point in shuffling if there is only one online CPU (ex: UP) */ 898 /* No point in shuffling if there is only one online CPU (ex: UP) */
@@ -902,29 +902,29 @@ static void rcu_torture_shuffle_tasks(void)
902 } 902 }
903 903
904 if (rcu_idle_cpu != -1) 904 if (rcu_idle_cpu != -1)
905 cpu_clear(rcu_idle_cpu, tmp_mask); 905 cpumask_clear_cpu(rcu_idle_cpu, shuffle_tmp_mask);
906 906
907 set_cpus_allowed_ptr(current, &tmp_mask); 907 set_cpus_allowed_ptr(current, shuffle_tmp_mask);
908 908
909 if (reader_tasks) { 909 if (reader_tasks) {
910 for (i = 0; i < nrealreaders; i++) 910 for (i = 0; i < nrealreaders; i++)
911 if (reader_tasks[i]) 911 if (reader_tasks[i])
912 set_cpus_allowed_ptr(reader_tasks[i], 912 set_cpus_allowed_ptr(reader_tasks[i],
913 &tmp_mask); 913 shuffle_tmp_mask);
914 } 914 }
915 915
916 if (fakewriter_tasks) { 916 if (fakewriter_tasks) {
917 for (i = 0; i < nfakewriters; i++) 917 for (i = 0; i < nfakewriters; i++)
918 if (fakewriter_tasks[i]) 918 if (fakewriter_tasks[i])
919 set_cpus_allowed_ptr(fakewriter_tasks[i], 919 set_cpus_allowed_ptr(fakewriter_tasks[i],
920 &tmp_mask); 920 shuffle_tmp_mask);
921 } 921 }
922 922
923 if (writer_task) 923 if (writer_task)
924 set_cpus_allowed_ptr(writer_task, &tmp_mask); 924 set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask);
925 925
926 if (stats_task) 926 if (stats_task)
927 set_cpus_allowed_ptr(stats_task, &tmp_mask); 927 set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask);
928 928
929 if (rcu_idle_cpu == -1) 929 if (rcu_idle_cpu == -1)
930 rcu_idle_cpu = num_online_cpus() - 1; 930 rcu_idle_cpu = num_online_cpus() - 1;
@@ -1012,6 +1012,7 @@ rcu_torture_cleanup(void)
1012 if (shuffler_task) { 1012 if (shuffler_task) {
1013 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); 1013 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
1014 kthread_stop(shuffler_task); 1014 kthread_stop(shuffler_task);
1015 free_cpumask_var(shuffle_tmp_mask);
1015 } 1016 }
1016 shuffler_task = NULL; 1017 shuffler_task = NULL;
1017 1018
@@ -1190,10 +1191,18 @@ rcu_torture_init(void)
1190 } 1191 }
1191 if (test_no_idle_hz) { 1192 if (test_no_idle_hz) {
1192 rcu_idle_cpu = num_online_cpus() - 1; 1193 rcu_idle_cpu = num_online_cpus() - 1;
1194
1195 if (!alloc_cpumask_var(&shuffle_tmp_mask, GFP_KERNEL)) {
1196 firsterr = -ENOMEM;
1197 VERBOSE_PRINTK_ERRSTRING("Failed to alloc mask");
1198 goto unwind;
1199 }
1200
1193 /* Create the shuffler thread */ 1201 /* Create the shuffler thread */
1194 shuffler_task = kthread_run(rcu_torture_shuffle, NULL, 1202 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
1195 "rcu_torture_shuffle"); 1203 "rcu_torture_shuffle");
1196 if (IS_ERR(shuffler_task)) { 1204 if (IS_ERR(shuffler_task)) {
1205 free_cpumask_var(shuffle_tmp_mask);
1197 firsterr = PTR_ERR(shuffler_task); 1206 firsterr = PTR_ERR(shuffler_task);
1198 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler"); 1207 VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
1199 shuffler_task = NULL; 1208 shuffler_task = NULL;
diff --git a/kernel/relay.c b/kernel/relay.c
index 9d79b7854fa6..e92db8c06acf 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
750 * from the scheduler (trying to re-grab 750 * from the scheduler (trying to re-grab
751 * rq->lock), so defer it. 751 * rq->lock), so defer it.
752 */ 752 */
753 __mod_timer(&buf->timer, jiffies + 1); 753 mod_timer(&buf->timer, jiffies + 1);
754 } 754 }
755 755
756 old = buf->data; 756 old = buf->data;
@@ -797,13 +797,15 @@ void relay_subbufs_consumed(struct rchan *chan,
797 if (!chan) 797 if (!chan)
798 return; 798 return;
799 799
800 if (cpu >= NR_CPUS || !chan->buf[cpu]) 800 if (cpu >= NR_CPUS || !chan->buf[cpu] ||
801 subbufs_consumed > chan->n_subbufs)
801 return; 802 return;
802 803
803 buf = chan->buf[cpu]; 804 buf = chan->buf[cpu];
804 buf->subbufs_consumed += subbufs_consumed; 805 if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed)
805 if (buf->subbufs_consumed > buf->subbufs_produced)
806 buf->subbufs_consumed = buf->subbufs_produced; 806 buf->subbufs_consumed = buf->subbufs_produced;
807 else
808 buf->subbufs_consumed += subbufs_consumed;
807} 809}
808EXPORT_SYMBOL_GPL(relay_subbufs_consumed); 810EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
809 811
diff --git a/kernel/sched.c b/kernel/sched.c
index 8e2558c2ba67..2325db2be31b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -331,6 +331,13 @@ static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
331 */ 331 */
332static DEFINE_SPINLOCK(task_group_lock); 332static DEFINE_SPINLOCK(task_group_lock);
333 333
334#ifdef CONFIG_SMP
335static int root_task_group_empty(void)
336{
337 return list_empty(&root_task_group.children);
338}
339#endif
340
334#ifdef CONFIG_FAIR_GROUP_SCHED 341#ifdef CONFIG_FAIR_GROUP_SCHED
335#ifdef CONFIG_USER_SCHED 342#ifdef CONFIG_USER_SCHED
336# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 343# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
@@ -391,6 +398,13 @@ static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
391 398
392#else 399#else
393 400
401#ifdef CONFIG_SMP
402static int root_task_group_empty(void)
403{
404 return 1;
405}
406#endif
407
394static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { } 408static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
395static inline struct task_group *task_group(struct task_struct *p) 409static inline struct task_group *task_group(struct task_struct *p)
396{ 410{
@@ -467,11 +481,17 @@ struct rt_rq {
467 struct rt_prio_array active; 481 struct rt_prio_array active;
468 unsigned long rt_nr_running; 482 unsigned long rt_nr_running;
469#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 483#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
470 int highest_prio; /* highest queued rt task prio */ 484 struct {
485 int curr; /* highest queued rt task prio */
486#ifdef CONFIG_SMP
487 int next; /* next highest */
488#endif
489 } highest_prio;
471#endif 490#endif
472#ifdef CONFIG_SMP 491#ifdef CONFIG_SMP
473 unsigned long rt_nr_migratory; 492 unsigned long rt_nr_migratory;
474 int overloaded; 493 int overloaded;
494 struct plist_head pushable_tasks;
475#endif 495#endif
476 int rt_throttled; 496 int rt_throttled;
477 u64 rt_time; 497 u64 rt_time;
@@ -549,7 +569,6 @@ struct rq {
549 unsigned long nr_running; 569 unsigned long nr_running;
550 #define CPU_LOAD_IDX_MAX 5 570 #define CPU_LOAD_IDX_MAX 5
551 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; 571 unsigned long cpu_load[CPU_LOAD_IDX_MAX];
552 unsigned char idle_at_tick;
553#ifdef CONFIG_NO_HZ 572#ifdef CONFIG_NO_HZ
554 unsigned long last_tick_seen; 573 unsigned long last_tick_seen;
555 unsigned char in_nohz_recently; 574 unsigned char in_nohz_recently;
@@ -590,6 +609,7 @@ struct rq {
590 struct root_domain *rd; 609 struct root_domain *rd;
591 struct sched_domain *sd; 610 struct sched_domain *sd;
592 611
612 unsigned char idle_at_tick;
593 /* For active balancing */ 613 /* For active balancing */
594 int active_balance; 614 int active_balance;
595 int push_cpu; 615 int push_cpu;
@@ -618,9 +638,6 @@ struct rq {
618 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ 638 /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
619 639
620 /* sys_sched_yield() stats */ 640 /* sys_sched_yield() stats */
621 unsigned int yld_exp_empty;
622 unsigned int yld_act_empty;
623 unsigned int yld_both_empty;
624 unsigned int yld_count; 641 unsigned int yld_count;
625 642
626 /* schedule() stats */ 643 /* schedule() stats */
@@ -1093,7 +1110,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
1093 if (rq == this_rq()) { 1110 if (rq == this_rq()) {
1094 hrtimer_restart(timer); 1111 hrtimer_restart(timer);
1095 } else if (!rq->hrtick_csd_pending) { 1112 } else if (!rq->hrtick_csd_pending) {
1096 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd); 1113 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
1097 rq->hrtick_csd_pending = 1; 1114 rq->hrtick_csd_pending = 1;
1098 } 1115 }
1099} 1116}
@@ -1183,10 +1200,10 @@ static void resched_task(struct task_struct *p)
1183 1200
1184 assert_spin_locked(&task_rq(p)->lock); 1201 assert_spin_locked(&task_rq(p)->lock);
1185 1202
1186 if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED))) 1203 if (test_tsk_need_resched(p))
1187 return; 1204 return;
1188 1205
1189 set_tsk_thread_flag(p, TIF_NEED_RESCHED); 1206 set_tsk_need_resched(p);
1190 1207
1191 cpu = task_cpu(p); 1208 cpu = task_cpu(p);
1192 if (cpu == smp_processor_id()) 1209 if (cpu == smp_processor_id())
@@ -1242,7 +1259,7 @@ void wake_up_idle_cpu(int cpu)
1242 * lockless. The worst case is that the other CPU runs the 1259 * lockless. The worst case is that the other CPU runs the
1243 * idle task through an additional NOOP schedule() 1260 * idle task through an additional NOOP schedule()
1244 */ 1261 */
1245 set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); 1262 set_tsk_need_resched(rq->idle);
1246 1263
1247 /* NEED_RESCHED must be visible before we test polling */ 1264 /* NEED_RESCHED must be visible before we test polling */
1248 smp_mb(); 1265 smp_mb();
@@ -1610,21 +1627,42 @@ static inline void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1610 1627
1611#endif 1628#endif
1612 1629
1630#ifdef CONFIG_PREEMPT
1631
1613/* 1632/*
1614 * double_lock_balance - lock the busiest runqueue, this_rq is locked already. 1633 * fair double_lock_balance: Safely acquires both rq->locks in a fair
1634 * way at the expense of forcing extra atomic operations in all
1635 * invocations. This assures that the double_lock is acquired using the
1636 * same underlying policy as the spinlock_t on this architecture, which
1637 * reduces latency compared to the unfair variant below. However, it
1638 * also adds more overhead and therefore may reduce throughput.
1615 */ 1639 */
1616static int double_lock_balance(struct rq *this_rq, struct rq *busiest) 1640static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(this_rq->lock)
1642 __acquires(busiest->lock)
1643 __acquires(this_rq->lock)
1644{
1645 spin_unlock(&this_rq->lock);
1646 double_rq_lock(this_rq, busiest);
1647
1648 return 1;
1649}
1650
1651#else
1652/*
1653 * Unfair double_lock_balance: Optimizes throughput at the expense of
1654 * latency by eliminating extra atomic operations when the locks are
1655 * already in proper order on entry. This favors lower cpu-ids and will
1656 * grant the double lock to lower cpus over higher ids under contention,
1657 * regardless of entry order into the function.
1658 */
1659static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
1617 __releases(this_rq->lock) 1660 __releases(this_rq->lock)
1618 __acquires(busiest->lock) 1661 __acquires(busiest->lock)
1619 __acquires(this_rq->lock) 1662 __acquires(this_rq->lock)
1620{ 1663{
1621 int ret = 0; 1664 int ret = 0;
1622 1665
1623 if (unlikely(!irqs_disabled())) {
1624 /* printk() doesn't work good under rq->lock */
1625 spin_unlock(&this_rq->lock);
1626 BUG_ON(1);
1627 }
1628 if (unlikely(!spin_trylock(&busiest->lock))) { 1666 if (unlikely(!spin_trylock(&busiest->lock))) {
1629 if (busiest < this_rq) { 1667 if (busiest < this_rq) {
1630 spin_unlock(&this_rq->lock); 1668 spin_unlock(&this_rq->lock);
@@ -1637,6 +1675,22 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1637 return ret; 1675 return ret;
1638} 1676}
1639 1677
1678#endif /* CONFIG_PREEMPT */
1679
1680/*
1681 * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
1682 */
1683static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
1684{
1685 if (unlikely(!irqs_disabled())) {
1686 /* printk() doesn't work good under rq->lock */
1687 spin_unlock(&this_rq->lock);
1688 BUG_ON(1);
1689 }
1690
1691 return _double_lock_balance(this_rq, busiest);
1692}
1693
1640static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest) 1694static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
1641 __releases(busiest->lock) 1695 __releases(busiest->lock)
1642{ 1696{
@@ -1705,6 +1759,9 @@ static void update_avg(u64 *avg, u64 sample)
1705 1759
1706static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup) 1760static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1707{ 1761{
1762 if (wakeup)
1763 p->se.start_runtime = p->se.sum_exec_runtime;
1764
1708 sched_info_queued(p); 1765 sched_info_queued(p);
1709 p->sched_class->enqueue_task(rq, p, wakeup); 1766 p->sched_class->enqueue_task(rq, p, wakeup);
1710 p->se.on_rq = 1; 1767 p->se.on_rq = 1;
@@ -1712,10 +1769,15 @@ static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
1712 1769
1713static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep) 1770static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
1714{ 1771{
1715 if (sleep && p->se.last_wakeup) { 1772 if (sleep) {
1716 update_avg(&p->se.avg_overlap, 1773 if (p->se.last_wakeup) {
1717 p->se.sum_exec_runtime - p->se.last_wakeup); 1774 update_avg(&p->se.avg_overlap,
1718 p->se.last_wakeup = 0; 1775 p->se.sum_exec_runtime - p->se.last_wakeup);
1776 p->se.last_wakeup = 0;
1777 } else {
1778 update_avg(&p->se.avg_wakeup,
1779 sysctl_sched_wakeup_granularity);
1780 }
1719 } 1781 }
1720 1782
1721 sched_info_dequeued(p); 1783 sched_info_dequeued(p);
@@ -2017,7 +2079,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2017 * it must be off the runqueue _entirely_, and not 2079 * it must be off the runqueue _entirely_, and not
2018 * preempted! 2080 * preempted!
2019 * 2081 *
2020 * So if it wa still runnable (but just not actively 2082 * So if it was still runnable (but just not actively
2021 * running right now), it's preempted, and we should 2083 * running right now), it's preempted, and we should
2022 * yield - it could be a while. 2084 * yield - it could be a while.
2023 */ 2085 */
@@ -2267,7 +2329,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2267 sync = 0; 2329 sync = 0;
2268 2330
2269#ifdef CONFIG_SMP 2331#ifdef CONFIG_SMP
2270 if (sched_feat(LB_WAKEUP_UPDATE)) { 2332 if (sched_feat(LB_WAKEUP_UPDATE) && !root_task_group_empty()) {
2271 struct sched_domain *sd; 2333 struct sched_domain *sd;
2272 2334
2273 this_cpu = raw_smp_processor_id(); 2335 this_cpu = raw_smp_processor_id();
@@ -2345,6 +2407,22 @@ out_activate:
2345 activate_task(rq, p, 1); 2407 activate_task(rq, p, 1);
2346 success = 1; 2408 success = 1;
2347 2409
2410 /*
2411 * Only attribute actual wakeups done by this task.
2412 */
2413 if (!in_interrupt()) {
2414 struct sched_entity *se = &current->se;
2415 u64 sample = se->sum_exec_runtime;
2416
2417 if (se->last_wakeup)
2418 sample -= se->last_wakeup;
2419 else
2420 sample -= se->start_runtime;
2421 update_avg(&se->avg_wakeup, sample);
2422
2423 se->last_wakeup = se->sum_exec_runtime;
2424 }
2425
2348out_running: 2426out_running:
2349 trace_sched_wakeup(rq, p, success); 2427 trace_sched_wakeup(rq, p, success);
2350 check_preempt_curr(rq, p, sync); 2428 check_preempt_curr(rq, p, sync);
@@ -2355,8 +2433,6 @@ out_running:
2355 p->sched_class->task_wake_up(rq, p); 2433 p->sched_class->task_wake_up(rq, p);
2356#endif 2434#endif
2357out: 2435out:
2358 current->se.last_wakeup = current->se.sum_exec_runtime;
2359
2360 task_rq_unlock(rq, &flags); 2436 task_rq_unlock(rq, &flags);
2361 2437
2362 return success; 2438 return success;
@@ -2386,6 +2462,8 @@ static void __sched_fork(struct task_struct *p)
2386 p->se.prev_sum_exec_runtime = 0; 2462 p->se.prev_sum_exec_runtime = 0;
2387 p->se.last_wakeup = 0; 2463 p->se.last_wakeup = 0;
2388 p->se.avg_overlap = 0; 2464 p->se.avg_overlap = 0;
2465 p->se.start_runtime = 0;
2466 p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
2389 2467
2390#ifdef CONFIG_SCHEDSTATS 2468#ifdef CONFIG_SCHEDSTATS
2391 p->se.wait_start = 0; 2469 p->se.wait_start = 0;
@@ -2448,6 +2526,8 @@ void sched_fork(struct task_struct *p, int clone_flags)
2448 /* Want to start with kernel preemption disabled. */ 2526 /* Want to start with kernel preemption disabled. */
2449 task_thread_info(p)->preempt_count = 1; 2527 task_thread_info(p)->preempt_count = 1;
2450#endif 2528#endif
2529 plist_node_init(&p->pushable_tasks, MAX_PRIO);
2530
2451 put_cpu(); 2531 put_cpu();
2452} 2532}
2453 2533
@@ -2491,7 +2571,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2491#ifdef CONFIG_PREEMPT_NOTIFIERS 2571#ifdef CONFIG_PREEMPT_NOTIFIERS
2492 2572
2493/** 2573/**
2494 * preempt_notifier_register - tell me when current is being being preempted & rescheduled 2574 * preempt_notifier_register - tell me when current is being preempted & rescheduled
2495 * @notifier: notifier struct to register 2575 * @notifier: notifier struct to register
2496 */ 2576 */
2497void preempt_notifier_register(struct preempt_notifier *notifier) 2577void preempt_notifier_register(struct preempt_notifier *notifier)
@@ -2588,6 +2668,12 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2588{ 2668{
2589 struct mm_struct *mm = rq->prev_mm; 2669 struct mm_struct *mm = rq->prev_mm;
2590 long prev_state; 2670 long prev_state;
2671#ifdef CONFIG_SMP
2672 int post_schedule = 0;
2673
2674 if (current->sched_class->needs_post_schedule)
2675 post_schedule = current->sched_class->needs_post_schedule(rq);
2676#endif
2591 2677
2592 rq->prev_mm = NULL; 2678 rq->prev_mm = NULL;
2593 2679
@@ -2606,7 +2692,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2606 finish_arch_switch(prev); 2692 finish_arch_switch(prev);
2607 finish_lock_switch(rq, prev); 2693 finish_lock_switch(rq, prev);
2608#ifdef CONFIG_SMP 2694#ifdef CONFIG_SMP
2609 if (current->sched_class->post_schedule) 2695 if (post_schedule)
2610 current->sched_class->post_schedule(rq); 2696 current->sched_class->post_schedule(rq);
2611#endif 2697#endif
2612 2698
@@ -2913,6 +2999,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2913 struct sched_domain *sd, enum cpu_idle_type idle, 2999 struct sched_domain *sd, enum cpu_idle_type idle,
2914 int *all_pinned) 3000 int *all_pinned)
2915{ 3001{
3002 int tsk_cache_hot = 0;
2916 /* 3003 /*
2917 * We do not migrate tasks that are: 3004 * We do not migrate tasks that are:
2918 * 1) running (obviously), or 3005 * 1) running (obviously), or
@@ -2936,10 +3023,11 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2936 * 2) too many balance attempts have failed. 3023 * 2) too many balance attempts have failed.
2937 */ 3024 */
2938 3025
2939 if (!task_hot(p, rq->clock, sd) || 3026 tsk_cache_hot = task_hot(p, rq->clock, sd);
2940 sd->nr_balance_failed > sd->cache_nice_tries) { 3027 if (!tsk_cache_hot ||
3028 sd->nr_balance_failed > sd->cache_nice_tries) {
2941#ifdef CONFIG_SCHEDSTATS 3029#ifdef CONFIG_SCHEDSTATS
2942 if (task_hot(p, rq->clock, sd)) { 3030 if (tsk_cache_hot) {
2943 schedstat_inc(sd, lb_hot_gained[idle]); 3031 schedstat_inc(sd, lb_hot_gained[idle]);
2944 schedstat_inc(p, se.nr_forced_migrations); 3032 schedstat_inc(p, se.nr_forced_migrations);
2945 } 3033 }
@@ -2947,7 +3035,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2947 return 1; 3035 return 1;
2948 } 3036 }
2949 3037
2950 if (task_hot(p, rq->clock, sd)) { 3038 if (tsk_cache_hot) {
2951 schedstat_inc(p, se.nr_failed_migrations_hot); 3039 schedstat_inc(p, se.nr_failed_migrations_hot);
2952 return 0; 3040 return 0;
2953 } 3041 }
@@ -2987,6 +3075,16 @@ next:
2987 pulled++; 3075 pulled++;
2988 rem_load_move -= p->se.load.weight; 3076 rem_load_move -= p->se.load.weight;
2989 3077
3078#ifdef CONFIG_PREEMPT
3079 /*
3080 * NEWIDLE balancing is a source of latency, so preemptible kernels
3081 * will stop after the first task is pulled to minimize the critical
3082 * section.
3083 */
3084 if (idle == CPU_NEWLY_IDLE)
3085 goto out;
3086#endif
3087
2990 /* 3088 /*
2991 * We only want to steal up to the prescribed amount of weighted load. 3089 * We only want to steal up to the prescribed amount of weighted load.
2992 */ 3090 */
@@ -3033,9 +3131,15 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
3033 sd, idle, all_pinned, &this_best_prio); 3131 sd, idle, all_pinned, &this_best_prio);
3034 class = class->next; 3132 class = class->next;
3035 3133
3134#ifdef CONFIG_PREEMPT
3135 /*
3136 * NEWIDLE balancing is a source of latency, so preemptible
3137 * kernels will stop after the first task is pulled to minimize
3138 * the critical section.
3139 */
3036 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running) 3140 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
3037 break; 3141 break;
3038 3142#endif
3039 } while (class && max_load_move > total_load_moved); 3143 } while (class && max_load_move > total_load_moved);
3040 3144
3041 return total_load_moved > 0; 3145 return total_load_moved > 0;
@@ -3085,246 +3189,480 @@ static int move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
3085 3189
3086 return 0; 3190 return 0;
3087} 3191}
3088 3192/********** Helpers for find_busiest_group ************************/
3089/* 3193/*
3090 * find_busiest_group finds and returns the busiest CPU group within the 3194 * sd_lb_stats - Structure to store the statistics of a sched_domain
3091 * domain. It calculates and returns the amount of weighted load which 3195 * during load balancing.
3092 * should be moved to restore balance via the imbalance parameter.
3093 */ 3196 */
3094static struct sched_group * 3197struct sd_lb_stats {
3095find_busiest_group(struct sched_domain *sd, int this_cpu, 3198 struct sched_group *busiest; /* Busiest group in this sd */
3096 unsigned long *imbalance, enum cpu_idle_type idle, 3199 struct sched_group *this; /* Local group in this sd */
3097 int *sd_idle, const struct cpumask *cpus, int *balance) 3200 unsigned long total_load; /* Total load of all groups in sd */
3098{ 3201 unsigned long total_pwr; /* Total power of all groups in sd */
3099 struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; 3202 unsigned long avg_load; /* Average load across all groups in sd */
3100 unsigned long max_load, avg_load, total_load, this_load, total_pwr; 3203
3101 unsigned long max_pull; 3204 /** Statistics of this group */
3102 unsigned long busiest_load_per_task, busiest_nr_running; 3205 unsigned long this_load;
3103 unsigned long this_load_per_task, this_nr_running; 3206 unsigned long this_load_per_task;
3104 int load_idx, group_imb = 0; 3207 unsigned long this_nr_running;
3208
3209 /* Statistics of the busiest group */
3210 unsigned long max_load;
3211 unsigned long busiest_load_per_task;
3212 unsigned long busiest_nr_running;
3213
3214 int group_imb; /* Is there imbalance in this sd */
3105#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3215#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3106 int power_savings_balance = 1; 3216 int power_savings_balance; /* Is powersave balance needed for this sd */
3107 unsigned long leader_nr_running = 0, min_load_per_task = 0; 3217 struct sched_group *group_min; /* Least loaded group in sd */
3108 unsigned long min_nr_running = ULONG_MAX; 3218 struct sched_group *group_leader; /* Group which relieves group_min */
3109 struct sched_group *group_min = NULL, *group_leader = NULL; 3219 unsigned long min_load_per_task; /* load_per_task in group_min */
3220 unsigned long leader_nr_running; /* Nr running of group_leader */
3221 unsigned long min_nr_running; /* Nr running of group_min */
3110#endif 3222#endif
3223};
3224
3225/*
3226 * sg_lb_stats - stats of a sched_group required for load_balancing
3227 */
3228struct sg_lb_stats {
3229 unsigned long avg_load; /*Avg load across the CPUs of the group */
3230 unsigned long group_load; /* Total load over the CPUs of the group */
3231 unsigned long sum_nr_running; /* Nr tasks running in the group */
3232 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
3233 unsigned long group_capacity;
3234 int group_imb; /* Is there an imbalance in the group ? */
3235};
3111 3236
3112 max_load = this_load = total_load = total_pwr = 0; 3237/**
3113 busiest_load_per_task = busiest_nr_running = 0; 3238 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
3114 this_load_per_task = this_nr_running = 0; 3239 * @group: The group whose first cpu is to be returned.
3240 */
3241static inline unsigned int group_first_cpu(struct sched_group *group)
3242{
3243 return cpumask_first(sched_group_cpus(group));
3244}
3115 3245
3116 if (idle == CPU_NOT_IDLE) 3246/**
3247 * get_sd_load_idx - Obtain the load index for a given sched domain.
3248 * @sd: The sched_domain whose load_idx is to be obtained.
3249 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
3250 */
3251static inline int get_sd_load_idx(struct sched_domain *sd,
3252 enum cpu_idle_type idle)
3253{
3254 int load_idx;
3255
3256 switch (idle) {
3257 case CPU_NOT_IDLE:
3117 load_idx = sd->busy_idx; 3258 load_idx = sd->busy_idx;
3118 else if (idle == CPU_NEWLY_IDLE) 3259 break;
3260
3261 case CPU_NEWLY_IDLE:
3119 load_idx = sd->newidle_idx; 3262 load_idx = sd->newidle_idx;
3120 else 3263 break;
3264 default:
3121 load_idx = sd->idle_idx; 3265 load_idx = sd->idle_idx;
3266 break;
3267 }
3122 3268
3123 do { 3269 return load_idx;
3124 unsigned long load, group_capacity, max_cpu_load, min_cpu_load; 3270}
3125 int local_group;
3126 int i;
3127 int __group_imb = 0;
3128 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3129 unsigned long sum_nr_running, sum_weighted_load;
3130 unsigned long sum_avg_load_per_task;
3131 unsigned long avg_load_per_task;
3132 3271
3133 local_group = cpumask_test_cpu(this_cpu,
3134 sched_group_cpus(group));
3135 3272
3136 if (local_group) 3273#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3137 balance_cpu = cpumask_first(sched_group_cpus(group)); 3274/**
3275 * init_sd_power_savings_stats - Initialize power savings statistics for
3276 * the given sched_domain, during load balancing.
3277 *
3278 * @sd: Sched domain whose power-savings statistics are to be initialized.
3279 * @sds: Variable containing the statistics for sd.
3280 * @idle: Idle status of the CPU at which we're performing load-balancing.
3281 */
3282static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3283 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3284{
3285 /*
3286 * Busy processors will not participate in power savings
3287 * balance.
3288 */
3289 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3290 sds->power_savings_balance = 0;
3291 else {
3292 sds->power_savings_balance = 1;
3293 sds->min_nr_running = ULONG_MAX;
3294 sds->leader_nr_running = 0;
3295 }
3296}
3138 3297
3139 /* Tally up the load of all CPUs in the group */ 3298/**
3140 sum_weighted_load = sum_nr_running = avg_load = 0; 3299 * update_sd_power_savings_stats - Update the power saving stats for a
3141 sum_avg_load_per_task = avg_load_per_task = 0; 3300 * sched_domain while performing load balancing.
3301 *
3302 * @group: sched_group belonging to the sched_domain under consideration.
3303 * @sds: Variable containing the statistics of the sched_domain
3304 * @local_group: Does group contain the CPU for which we're performing
3305 * load balancing ?
3306 * @sgs: Variable containing the statistics of the group.
3307 */
3308static inline void update_sd_power_savings_stats(struct sched_group *group,
3309 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3310{
3142 3311
3143 max_cpu_load = 0; 3312 if (!sds->power_savings_balance)
3144 min_cpu_load = ~0UL; 3313 return;
3145 3314
3146 for_each_cpu_and(i, sched_group_cpus(group), cpus) { 3315 /*
3147 struct rq *rq = cpu_rq(i); 3316 * If the local group is idle or completely loaded
3317 * no need to do power savings balance at this domain
3318 */
3319 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
3320 !sds->this_nr_running))
3321 sds->power_savings_balance = 0;
3148 3322
3149 if (*sd_idle && rq->nr_running) 3323 /*
3150 *sd_idle = 0; 3324 * If a group is already running at full capacity or idle,
3325 * don't include that group in power savings calculations
3326 */
3327 if (!sds->power_savings_balance ||
3328 sgs->sum_nr_running >= sgs->group_capacity ||
3329 !sgs->sum_nr_running)
3330 return;
3151 3331
3152 /* Bias balancing toward cpus of our domain */ 3332 /*
3153 if (local_group) { 3333 * Calculate the group which has the least non-idle load.
3154 if (idle_cpu(i) && !first_idle_cpu) { 3334 * This is the group from where we need to pick up the load
3155 first_idle_cpu = 1; 3335 * for saving power
3156 balance_cpu = i; 3336 */
3157 } 3337 if ((sgs->sum_nr_running < sds->min_nr_running) ||
3338 (sgs->sum_nr_running == sds->min_nr_running &&
3339 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
3340 sds->group_min = group;
3341 sds->min_nr_running = sgs->sum_nr_running;
3342 sds->min_load_per_task = sgs->sum_weighted_load /
3343 sgs->sum_nr_running;
3344 }
3158 3345
3159 load = target_load(i, load_idx); 3346 /*
3160 } else { 3347 * Calculate the group which is almost near its
3161 load = source_load(i, load_idx); 3348 * capacity but still has some space to pick up some load
3162 if (load > max_cpu_load) 3349 * from other group and save more power
3163 max_cpu_load = load; 3350 */
3164 if (min_cpu_load > load) 3351 if (sgs->sum_nr_running > sgs->group_capacity - 1)
3165 min_cpu_load = load; 3352 return;
3166 }
3167 3353
3168 avg_load += load; 3354 if (sgs->sum_nr_running > sds->leader_nr_running ||
3169 sum_nr_running += rq->nr_running; 3355 (sgs->sum_nr_running == sds->leader_nr_running &&
3170 sum_weighted_load += weighted_cpuload(i); 3356 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
3357 sds->group_leader = group;
3358 sds->leader_nr_running = sgs->sum_nr_running;
3359 }
3360}
3171 3361
3172 sum_avg_load_per_task += cpu_avg_load_per_task(i); 3362/**
3173 } 3363 * check_power_save_busiest_group - see if there is potential for some power-savings balance
3364 * @sds: Variable containing the statistics of the sched_domain
3365 * under consideration.
3366 * @this_cpu: Cpu at which we're currently performing load-balancing.
3367 * @imbalance: Variable to store the imbalance.
3368 *
3369 * Description:
3370 * Check if we have potential to perform some power-savings balance.
3371 * If yes, set the busiest group to be the least loaded group in the
3372 * sched_domain, so that it's CPUs can be put to idle.
3373 *
3374 * Returns 1 if there is potential to perform power-savings balance.
3375 * Else returns 0.
3376 */
3377static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3378 int this_cpu, unsigned long *imbalance)
3379{
3380 if (!sds->power_savings_balance)
3381 return 0;
3174 3382
3175 /* 3383 if (sds->this != sds->group_leader ||
3176 * First idle cpu or the first cpu(busiest) in this sched group 3384 sds->group_leader == sds->group_min)
3177 * is eligible for doing load balancing at this and above 3385 return 0;
3178 * domains. In the newly idle case, we will allow all the cpu's
3179 * to do the newly idle load balance.
3180 */
3181 if (idle != CPU_NEWLY_IDLE && local_group &&
3182 balance_cpu != this_cpu && balance) {
3183 *balance = 0;
3184 goto ret;
3185 }
3186 3386
3187 total_load += avg_load; 3387 *imbalance = sds->min_load_per_task;
3188 total_pwr += group->__cpu_power; 3388 sds->busiest = sds->group_min;
3189 3389
3190 /* Adjust by relative CPU power of the group */ 3390 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) {
3191 avg_load = sg_div_cpu_power(group, 3391 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu =
3192 avg_load * SCHED_LOAD_SCALE); 3392 group_first_cpu(sds->group_leader);
3393 }
3193 3394
3395 return 1;
3194 3396
3195 /* 3397}
3196 * Consider the group unbalanced when the imbalance is larger 3398#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3197 * than the average weight of two tasks. 3399static inline void init_sd_power_savings_stats(struct sched_domain *sd,
3198 * 3400 struct sd_lb_stats *sds, enum cpu_idle_type idle)
3199 * APZ: with cgroup the avg task weight can vary wildly and 3401{
3200 * might not be a suitable number - should we keep a 3402 return;
3201 * normalized nr_running number somewhere that negates 3403}
3202 * the hierarchy? 3404
3203 */ 3405static inline void update_sd_power_savings_stats(struct sched_group *group,
3204 avg_load_per_task = sg_div_cpu_power(group, 3406 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
3205 sum_avg_load_per_task * SCHED_LOAD_SCALE); 3407{
3408 return;
3409}
3410
3411static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
3412 int this_cpu, unsigned long *imbalance)
3413{
3414 return 0;
3415}
3416#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
3417
3418
3419/**
3420 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3421 * @group: sched_group whose statistics are to be updated.
3422 * @this_cpu: Cpu for which load balance is currently performed.
3423 * @idle: Idle status of this_cpu
3424 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3425 * @sd_idle: Idle status of the sched_domain containing group.
3426 * @local_group: Does group contain this_cpu.
3427 * @cpus: Set of cpus considered for load balancing.
3428 * @balance: Should we balance.
3429 * @sgs: variable to hold the statistics for this group.
3430 */
3431static inline void update_sg_lb_stats(struct sched_group *group, int this_cpu,
3432 enum cpu_idle_type idle, int load_idx, int *sd_idle,
3433 int local_group, const struct cpumask *cpus,
3434 int *balance, struct sg_lb_stats *sgs)
3435{
3436 unsigned long load, max_cpu_load, min_cpu_load;
3437 int i;
3438 unsigned int balance_cpu = -1, first_idle_cpu = 0;
3439 unsigned long sum_avg_load_per_task;
3440 unsigned long avg_load_per_task;
3206 3441
3207 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task) 3442 if (local_group)
3208 __group_imb = 1; 3443 balance_cpu = group_first_cpu(group);
3209 3444
3210 group_capacity = group->__cpu_power / SCHED_LOAD_SCALE; 3445 /* Tally up the load of all CPUs in the group */
3446 sum_avg_load_per_task = avg_load_per_task = 0;
3447 max_cpu_load = 0;
3448 min_cpu_load = ~0UL;
3211 3449
3450 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3451 struct rq *rq = cpu_rq(i);
3452
3453 if (*sd_idle && rq->nr_running)
3454 *sd_idle = 0;
3455
3456 /* Bias balancing toward cpus of our domain */
3212 if (local_group) { 3457 if (local_group) {
3213 this_load = avg_load; 3458 if (idle_cpu(i) && !first_idle_cpu) {
3214 this = group; 3459 first_idle_cpu = 1;
3215 this_nr_running = sum_nr_running; 3460 balance_cpu = i;
3216 this_load_per_task = sum_weighted_load; 3461 }
3217 } else if (avg_load > max_load && 3462
3218 (sum_nr_running > group_capacity || __group_imb)) { 3463 load = target_load(i, load_idx);
3219 max_load = avg_load; 3464 } else {
3220 busiest = group; 3465 load = source_load(i, load_idx);
3221 busiest_nr_running = sum_nr_running; 3466 if (load > max_cpu_load)
3222 busiest_load_per_task = sum_weighted_load; 3467 max_cpu_load = load;
3223 group_imb = __group_imb; 3468 if (min_cpu_load > load)
3469 min_cpu_load = load;
3224 } 3470 }
3225 3471
3226#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3472 sgs->group_load += load;
3227 /* 3473 sgs->sum_nr_running += rq->nr_running;
3228 * Busy processors will not participate in power savings 3474 sgs->sum_weighted_load += weighted_cpuload(i);
3229 * balance.
3230 */
3231 if (idle == CPU_NOT_IDLE ||
3232 !(sd->flags & SD_POWERSAVINGS_BALANCE))
3233 goto group_next;
3234 3475
3235 /* 3476 sum_avg_load_per_task += cpu_avg_load_per_task(i);
3236 * If the local group is idle or completely loaded 3477 }
3237 * no need to do power savings balance at this domain
3238 */
3239 if (local_group && (this_nr_running >= group_capacity ||
3240 !this_nr_running))
3241 power_savings_balance = 0;
3242 3478
3243 /* 3479 /*
3244 * If a group is already running at full capacity or idle, 3480 * First idle cpu or the first cpu(busiest) in this sched group
3245 * don't include that group in power savings calculations 3481 * is eligible for doing load balancing at this and above
3246 */ 3482 * domains. In the newly idle case, we will allow all the cpu's
3247 if (!power_savings_balance || sum_nr_running >= group_capacity 3483 * to do the newly idle load balance.
3248 || !sum_nr_running) 3484 */
3249 goto group_next; 3485 if (idle != CPU_NEWLY_IDLE && local_group &&
3486 balance_cpu != this_cpu && balance) {
3487 *balance = 0;
3488 return;
3489 }
3250 3490
3251 /* 3491 /* Adjust by relative CPU power of the group */
3252 * Calculate the group which has the least non-idle load. 3492 sgs->avg_load = sg_div_cpu_power(group,
3253 * This is the group from where we need to pick up the load 3493 sgs->group_load * SCHED_LOAD_SCALE);
3254 * for saving power
3255 */
3256 if ((sum_nr_running < min_nr_running) ||
3257 (sum_nr_running == min_nr_running &&
3258 cpumask_first(sched_group_cpus(group)) >
3259 cpumask_first(sched_group_cpus(group_min)))) {
3260 group_min = group;
3261 min_nr_running = sum_nr_running;
3262 min_load_per_task = sum_weighted_load /
3263 sum_nr_running;
3264 }
3265 3494
3266 /* 3495
3267 * Calculate the group which is almost near its 3496 /*
3268 * capacity but still has some space to pick up some load 3497 * Consider the group unbalanced when the imbalance is larger
3269 * from other group and save more power 3498 * than the average weight of two tasks.
3270 */ 3499 *
3271 if (sum_nr_running <= group_capacity - 1) { 3500 * APZ: with cgroup the avg task weight can vary wildly and
3272 if (sum_nr_running > leader_nr_running || 3501 * might not be a suitable number - should we keep a
3273 (sum_nr_running == leader_nr_running && 3502 * normalized nr_running number somewhere that negates
3274 cpumask_first(sched_group_cpus(group)) < 3503 * the hierarchy?
3275 cpumask_first(sched_group_cpus(group_leader)))) { 3504 */
3276 group_leader = group; 3505 avg_load_per_task = sg_div_cpu_power(group,
3277 leader_nr_running = sum_nr_running; 3506 sum_avg_load_per_task * SCHED_LOAD_SCALE);
3278 } 3507
3508 if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
3509 sgs->group_imb = 1;
3510
3511 sgs->group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
3512
3513}
3514
3515/**
3516 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
3517 * @sd: sched_domain whose statistics are to be updated.
3518 * @this_cpu: Cpu for which load balance is currently performed.
3519 * @idle: Idle status of this_cpu
3520 * @sd_idle: Idle status of the sched_domain containing group.
3521 * @cpus: Set of cpus considered for load balancing.
3522 * @balance: Should we balance.
3523 * @sds: variable to hold the statistics for this sched_domain.
3524 */
3525static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
3526 enum cpu_idle_type idle, int *sd_idle,
3527 const struct cpumask *cpus, int *balance,
3528 struct sd_lb_stats *sds)
3529{
3530 struct sched_group *group = sd->groups;
3531 struct sg_lb_stats sgs;
3532 int load_idx;
3533
3534 init_sd_power_savings_stats(sd, sds, idle);
3535 load_idx = get_sd_load_idx(sd, idle);
3536
3537 do {
3538 int local_group;
3539
3540 local_group = cpumask_test_cpu(this_cpu,
3541 sched_group_cpus(group));
3542 memset(&sgs, 0, sizeof(sgs));
3543 update_sg_lb_stats(group, this_cpu, idle, load_idx, sd_idle,
3544 local_group, cpus, balance, &sgs);
3545
3546 if (local_group && balance && !(*balance))
3547 return;
3548
3549 sds->total_load += sgs.group_load;
3550 sds->total_pwr += group->__cpu_power;
3551
3552 if (local_group) {
3553 sds->this_load = sgs.avg_load;
3554 sds->this = group;
3555 sds->this_nr_running = sgs.sum_nr_running;
3556 sds->this_load_per_task = sgs.sum_weighted_load;
3557 } else if (sgs.avg_load > sds->max_load &&
3558 (sgs.sum_nr_running > sgs.group_capacity ||
3559 sgs.group_imb)) {
3560 sds->max_load = sgs.avg_load;
3561 sds->busiest = group;
3562 sds->busiest_nr_running = sgs.sum_nr_running;
3563 sds->busiest_load_per_task = sgs.sum_weighted_load;
3564 sds->group_imb = sgs.group_imb;
3279 } 3565 }
3280group_next: 3566
3281#endif 3567 update_sd_power_savings_stats(group, sds, local_group, &sgs);
3282 group = group->next; 3568 group = group->next;
3283 } while (group != sd->groups); 3569 } while (group != sd->groups);
3284 3570
3285 if (!busiest || this_load >= max_load || busiest_nr_running == 0) 3571}
3286 goto out_balanced;
3287
3288 avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
3289 3572
3290 if (this_load >= avg_load || 3573/**
3291 100*max_load <= sd->imbalance_pct*this_load) 3574 * fix_small_imbalance - Calculate the minor imbalance that exists
3292 goto out_balanced; 3575 * amongst the groups of a sched_domain, during
3576 * load balancing.
3577 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3578 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3579 * @imbalance: Variable to store the imbalance.
3580 */
3581static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3582 int this_cpu, unsigned long *imbalance)
3583{
3584 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3585 unsigned int imbn = 2;
3586
3587 if (sds->this_nr_running) {
3588 sds->this_load_per_task /= sds->this_nr_running;
3589 if (sds->busiest_load_per_task >
3590 sds->this_load_per_task)
3591 imbn = 1;
3592 } else
3593 sds->this_load_per_task =
3594 cpu_avg_load_per_task(this_cpu);
3293 3595
3294 busiest_load_per_task /= busiest_nr_running; 3596 if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
3295 if (group_imb) 3597 sds->busiest_load_per_task * imbn) {
3296 busiest_load_per_task = min(busiest_load_per_task, avg_load); 3598 *imbalance = sds->busiest_load_per_task;
3599 return;
3600 }
3297 3601
3298 /* 3602 /*
3299 * We're trying to get all the cpus to the average_load, so we don't 3603 * OK, we don't have enough imbalance to justify moving tasks,
3300 * want to push ourselves above the average load, nor do we wish to 3604 * however we may be able to increase total CPU power used by
3301 * reduce the max loaded cpu below the average load, as either of these 3605 * moving them.
3302 * actions would just result in more rebalancing later, and ping-pong
3303 * tasks around. Thus we look for the minimum possible imbalance.
3304 * Negative imbalances (*we* are more loaded than anyone else) will
3305 * be counted as no imbalance for these purposes -- we can't fix that
3306 * by pulling tasks to us. Be careful of negative numbers as they'll
3307 * appear as very large values with unsigned longs.
3308 */ 3606 */
3309 if (max_load <= busiest_load_per_task)
3310 goto out_balanced;
3311 3607
3608 pwr_now += sds->busiest->__cpu_power *
3609 min(sds->busiest_load_per_task, sds->max_load);
3610 pwr_now += sds->this->__cpu_power *
3611 min(sds->this_load_per_task, sds->this_load);
3612 pwr_now /= SCHED_LOAD_SCALE;
3613
3614 /* Amount of load we'd subtract */
3615 tmp = sg_div_cpu_power(sds->busiest,
3616 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3617 if (sds->max_load > tmp)
3618 pwr_move += sds->busiest->__cpu_power *
3619 min(sds->busiest_load_per_task, sds->max_load - tmp);
3620
3621 /* Amount of load we'd add */
3622 if (sds->max_load * sds->busiest->__cpu_power <
3623 sds->busiest_load_per_task * SCHED_LOAD_SCALE)
3624 tmp = sg_div_cpu_power(sds->this,
3625 sds->max_load * sds->busiest->__cpu_power);
3626 else
3627 tmp = sg_div_cpu_power(sds->this,
3628 sds->busiest_load_per_task * SCHED_LOAD_SCALE);
3629 pwr_move += sds->this->__cpu_power *
3630 min(sds->this_load_per_task, sds->this_load + tmp);
3631 pwr_move /= SCHED_LOAD_SCALE;
3632
3633 /* Move if we gain throughput */
3634 if (pwr_move > pwr_now)
3635 *imbalance = sds->busiest_load_per_task;
3636}
3637
3638/**
3639 * calculate_imbalance - Calculate the amount of imbalance present within the
3640 * groups of a given sched_domain during load balance.
3641 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3642 * @this_cpu: Cpu for which currently load balance is being performed.
3643 * @imbalance: The variable to store the imbalance.
3644 */
3645static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3646 unsigned long *imbalance)
3647{
3648 unsigned long max_pull;
3312 /* 3649 /*
3313 * In the presence of smp nice balancing, certain scenarios can have 3650 * In the presence of smp nice balancing, certain scenarios can have
3314 * max load less than avg load(as we skip the groups at or below 3651 * max load less than avg load(as we skip the groups at or below
3315 * its cpu_power, while calculating max_load..) 3652 * its cpu_power, while calculating max_load..)
3316 */ 3653 */
3317 if (max_load < avg_load) { 3654 if (sds->max_load < sds->avg_load) {
3318 *imbalance = 0; 3655 *imbalance = 0;
3319 goto small_imbalance; 3656 return fix_small_imbalance(sds, this_cpu, imbalance);
3320 } 3657 }
3321 3658
3322 /* Don't want to pull so many tasks that a group would go idle */ 3659 /* Don't want to pull so many tasks that a group would go idle */
3323 max_pull = min(max_load - avg_load, max_load - busiest_load_per_task); 3660 max_pull = min(sds->max_load - sds->avg_load,
3661 sds->max_load - sds->busiest_load_per_task);
3324 3662
3325 /* How much load to actually move to equalise the imbalance */ 3663 /* How much load to actually move to equalise the imbalance */
3326 *imbalance = min(max_pull * busiest->__cpu_power, 3664 *imbalance = min(max_pull * sds->busiest->__cpu_power,
3327 (avg_load - this_load) * this->__cpu_power) 3665 (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
3328 / SCHED_LOAD_SCALE; 3666 / SCHED_LOAD_SCALE;
3329 3667
3330 /* 3668 /*
@@ -3333,78 +3671,110 @@ group_next:
3333 * a think about bumping its value to force at least one task to be 3671 * a think about bumping its value to force at least one task to be
3334 * moved 3672 * moved
3335 */ 3673 */
3336 if (*imbalance < busiest_load_per_task) { 3674 if (*imbalance < sds->busiest_load_per_task)
3337 unsigned long tmp, pwr_now, pwr_move; 3675 return fix_small_imbalance(sds, this_cpu, imbalance);
3338 unsigned int imbn;
3339
3340small_imbalance:
3341 pwr_move = pwr_now = 0;
3342 imbn = 2;
3343 if (this_nr_running) {
3344 this_load_per_task /= this_nr_running;
3345 if (busiest_load_per_task > this_load_per_task)
3346 imbn = 1;
3347 } else
3348 this_load_per_task = cpu_avg_load_per_task(this_cpu);
3349 3676
3350 if (max_load - this_load + busiest_load_per_task >= 3677}
3351 busiest_load_per_task * imbn) { 3678/******* find_busiest_group() helpers end here *********************/
3352 *imbalance = busiest_load_per_task;
3353 return busiest;
3354 }
3355 3679
3356 /* 3680/**
3357 * OK, we don't have enough imbalance to justify moving tasks, 3681 * find_busiest_group - Returns the busiest group within the sched_domain
3358 * however we may be able to increase total CPU power used by 3682 * if there is an imbalance. If there isn't an imbalance, and
3359 * moving them. 3683 * the user has opted for power-savings, it returns a group whose
3360 */ 3684 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3685 * such a group exists.
3686 *
3687 * Also calculates the amount of weighted load which should be moved
3688 * to restore balance.
3689 *
3690 * @sd: The sched_domain whose busiest group is to be returned.
3691 * @this_cpu: The cpu for which load balancing is currently being performed.
3692 * @imbalance: Variable which stores amount of weighted load which should
3693 * be moved to restore balance/put a group to idle.
3694 * @idle: The idle status of this_cpu.
3695 * @sd_idle: The idleness of sd
3696 * @cpus: The set of CPUs under consideration for load-balancing.
3697 * @balance: Pointer to a variable indicating if this_cpu
3698 * is the appropriate cpu to perform load balancing at this_level.
3699 *
3700 * Returns: - the busiest group if imbalance exists.
3701 * - If no imbalance and user has opted for power-savings balance,
3702 * return the least loaded group whose CPUs can be
3703 * put to idle by rebalancing its tasks onto our group.
3704 */
3705static struct sched_group *
3706find_busiest_group(struct sched_domain *sd, int this_cpu,
3707 unsigned long *imbalance, enum cpu_idle_type idle,
3708 int *sd_idle, const struct cpumask *cpus, int *balance)
3709{
3710 struct sd_lb_stats sds;
3361 3711
3362 pwr_now += busiest->__cpu_power * 3712 memset(&sds, 0, sizeof(sds));
3363 min(busiest_load_per_task, max_load);
3364 pwr_now += this->__cpu_power *
3365 min(this_load_per_task, this_load);
3366 pwr_now /= SCHED_LOAD_SCALE;
3367
3368 /* Amount of load we'd subtract */
3369 tmp = sg_div_cpu_power(busiest,
3370 busiest_load_per_task * SCHED_LOAD_SCALE);
3371 if (max_load > tmp)
3372 pwr_move += busiest->__cpu_power *
3373 min(busiest_load_per_task, max_load - tmp);
3374
3375 /* Amount of load we'd add */
3376 if (max_load * busiest->__cpu_power <
3377 busiest_load_per_task * SCHED_LOAD_SCALE)
3378 tmp = sg_div_cpu_power(this,
3379 max_load * busiest->__cpu_power);
3380 else
3381 tmp = sg_div_cpu_power(this,
3382 busiest_load_per_task * SCHED_LOAD_SCALE);
3383 pwr_move += this->__cpu_power *
3384 min(this_load_per_task, this_load + tmp);
3385 pwr_move /= SCHED_LOAD_SCALE;
3386 3713
3387 /* Move if we gain throughput */ 3714 /*
3388 if (pwr_move > pwr_now) 3715 * Compute the various statistics relavent for load balancing at
3389 *imbalance = busiest_load_per_task; 3716 * this level.
3390 } 3717 */
3718 update_sd_lb_stats(sd, this_cpu, idle, sd_idle, cpus,
3719 balance, &sds);
3720
3721 /* Cases where imbalance does not exist from POV of this_cpu */
3722 /* 1) this_cpu is not the appropriate cpu to perform load balancing
3723 * at this level.
3724 * 2) There is no busy sibling group to pull from.
3725 * 3) This group is the busiest group.
3726 * 4) This group is more busy than the avg busieness at this
3727 * sched_domain.
3728 * 5) The imbalance is within the specified limit.
3729 * 6) Any rebalance would lead to ping-pong
3730 */
3731 if (balance && !(*balance))
3732 goto ret;
3391 3733
3392 return busiest; 3734 if (!sds.busiest || sds.busiest_nr_running == 0)
3735 goto out_balanced;
3393 3736
3394out_balanced: 3737 if (sds.this_load >= sds.max_load)
3395#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 3738 goto out_balanced;
3396 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
3397 goto ret;
3398 3739
3399 if (this == group_leader && group_leader != group_min) { 3740 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3400 *imbalance = min_load_per_task; 3741
3401 if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP) { 3742 if (sds.this_load >= sds.avg_load)
3402 cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu = 3743 goto out_balanced;
3403 cpumask_first(sched_group_cpus(group_leader)); 3744
3404 } 3745 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3405 return group_min; 3746 goto out_balanced;
3406 } 3747
3407#endif 3748 sds.busiest_load_per_task /= sds.busiest_nr_running;
3749 if (sds.group_imb)
3750 sds.busiest_load_per_task =
3751 min(sds.busiest_load_per_task, sds.avg_load);
3752
3753 /*
3754 * We're trying to get all the cpus to the average_load, so we don't
3755 * want to push ourselves above the average load, nor do we wish to
3756 * reduce the max loaded cpu below the average load, as either of these
3757 * actions would just result in more rebalancing later, and ping-pong
3758 * tasks around. Thus we look for the minimum possible imbalance.
3759 * Negative imbalances (*we* are more loaded than anyone else) will
3760 * be counted as no imbalance for these purposes -- we can't fix that
3761 * by pulling tasks to us. Be careful of negative numbers as they'll
3762 * appear as very large values with unsigned longs.
3763 */
3764 if (sds.max_load <= sds.busiest_load_per_task)
3765 goto out_balanced;
3766
3767 /* Looks like there is an imbalance. Compute it */
3768 calculate_imbalance(&sds, this_cpu, imbalance);
3769 return sds.busiest;
3770
3771out_balanced:
3772 /*
3773 * There is no obvious imbalance. But check if we can do some balancing
3774 * to save power.
3775 */
3776 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3777 return sds.busiest;
3408ret: 3778ret:
3409 *imbalance = 0; 3779 *imbalance = 0;
3410 return NULL; 3780 return NULL;
@@ -4057,6 +4427,11 @@ static void run_rebalance_domains(struct softirq_action *h)
4057#endif 4427#endif
4058} 4428}
4059 4429
4430static inline int on_null_domain(int cpu)
4431{
4432 return !rcu_dereference(cpu_rq(cpu)->sd);
4433}
4434
4060/* 4435/*
4061 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing. 4436 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
4062 * 4437 *
@@ -4114,7 +4489,9 @@ static inline void trigger_load_balance(struct rq *rq, int cpu)
4114 cpumask_test_cpu(cpu, nohz.cpu_mask)) 4489 cpumask_test_cpu(cpu, nohz.cpu_mask))
4115 return; 4490 return;
4116#endif 4491#endif
4117 if (time_after_eq(jiffies, rq->next_balance)) 4492 /* Don't need to rebalance while attached to NULL domain */
4493 if (time_after_eq(jiffies, rq->next_balance) &&
4494 likely(!on_null_domain(cpu)))
4118 raise_softirq(SCHED_SOFTIRQ); 4495 raise_softirq(SCHED_SOFTIRQ);
4119} 4496}
4120 4497
@@ -4508,11 +4885,33 @@ static inline void schedule_debug(struct task_struct *prev)
4508#endif 4885#endif
4509} 4886}
4510 4887
4888static void put_prev_task(struct rq *rq, struct task_struct *prev)
4889{
4890 if (prev->state == TASK_RUNNING) {
4891 u64 runtime = prev->se.sum_exec_runtime;
4892
4893 runtime -= prev->se.prev_sum_exec_runtime;
4894 runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
4895
4896 /*
4897 * In order to avoid avg_overlap growing stale when we are
4898 * indeed overlapping and hence not getting put to sleep, grow
4899 * the avg_overlap on preemption.
4900 *
4901 * We use the average preemption runtime because that
4902 * correlates to the amount of cache footprint a task can
4903 * build up.
4904 */
4905 update_avg(&prev->se.avg_overlap, runtime);
4906 }
4907 prev->sched_class->put_prev_task(rq, prev);
4908}
4909
4511/* 4910/*
4512 * Pick up the highest-prio task: 4911 * Pick up the highest-prio task:
4513 */ 4912 */
4514static inline struct task_struct * 4913static inline struct task_struct *
4515pick_next_task(struct rq *rq, struct task_struct *prev) 4914pick_next_task(struct rq *rq)
4516{ 4915{
4517 const struct sched_class *class; 4916 const struct sched_class *class;
4518 struct task_struct *p; 4917 struct task_struct *p;
@@ -4543,15 +4942,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev)
4543/* 4942/*
4544 * schedule() is the main scheduler function. 4943 * schedule() is the main scheduler function.
4545 */ 4944 */
4546asmlinkage void __sched schedule(void) 4945asmlinkage void __sched __schedule(void)
4547{ 4946{
4548 struct task_struct *prev, *next; 4947 struct task_struct *prev, *next;
4549 unsigned long *switch_count; 4948 unsigned long *switch_count;
4550 struct rq *rq; 4949 struct rq *rq;
4551 int cpu; 4950 int cpu;
4552 4951
4553need_resched:
4554 preempt_disable();
4555 cpu = smp_processor_id(); 4952 cpu = smp_processor_id();
4556 rq = cpu_rq(cpu); 4953 rq = cpu_rq(cpu);
4557 rcu_qsctr_inc(cpu); 4954 rcu_qsctr_inc(cpu);
@@ -4586,8 +4983,8 @@ need_resched_nonpreemptible:
4586 if (unlikely(!rq->nr_running)) 4983 if (unlikely(!rq->nr_running))
4587 idle_balance(cpu, rq); 4984 idle_balance(cpu, rq);
4588 4985
4589 prev->sched_class->put_prev_task(rq, prev); 4986 put_prev_task(rq, prev);
4590 next = pick_next_task(rq, prev); 4987 next = pick_next_task(rq);
4591 4988
4592 if (likely(prev != next)) { 4989 if (likely(prev != next)) {
4593 sched_info_switch(prev, next); 4990 sched_info_switch(prev, next);
@@ -4608,13 +5005,80 @@ need_resched_nonpreemptible:
4608 5005
4609 if (unlikely(reacquire_kernel_lock(current) < 0)) 5006 if (unlikely(reacquire_kernel_lock(current) < 0))
4610 goto need_resched_nonpreemptible; 5007 goto need_resched_nonpreemptible;
5008}
4611 5009
5010asmlinkage void __sched schedule(void)
5011{
5012need_resched:
5013 preempt_disable();
5014 __schedule();
4612 preempt_enable_no_resched(); 5015 preempt_enable_no_resched();
4613 if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) 5016 if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
4614 goto need_resched; 5017 goto need_resched;
4615} 5018}
4616EXPORT_SYMBOL(schedule); 5019EXPORT_SYMBOL(schedule);
4617 5020
5021#ifdef CONFIG_SMP
5022/*
5023 * Look out! "owner" is an entirely speculative pointer
5024 * access and not reliable.
5025 */
5026int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
5027{
5028 unsigned int cpu;
5029 struct rq *rq;
5030
5031 if (!sched_feat(OWNER_SPIN))
5032 return 0;
5033
5034#ifdef CONFIG_DEBUG_PAGEALLOC
5035 /*
5036 * Need to access the cpu field knowing that
5037 * DEBUG_PAGEALLOC could have unmapped it if
5038 * the mutex owner just released it and exited.
5039 */
5040 if (probe_kernel_address(&owner->cpu, cpu))
5041 goto out;
5042#else
5043 cpu = owner->cpu;
5044#endif
5045
5046 /*
5047 * Even if the access succeeded (likely case),
5048 * the cpu field may no longer be valid.
5049 */
5050 if (cpu >= nr_cpumask_bits)
5051 goto out;
5052
5053 /*
5054 * We need to validate that we can do a
5055 * get_cpu() and that we have the percpu area.
5056 */
5057 if (!cpu_online(cpu))
5058 goto out;
5059
5060 rq = cpu_rq(cpu);
5061
5062 for (;;) {
5063 /*
5064 * Owner changed, break to re-assess state.
5065 */
5066 if (lock->owner != owner)
5067 break;
5068
5069 /*
5070 * Is that owner really running on that cpu?
5071 */
5072 if (task_thread_info(rq->curr) != owner || need_resched())
5073 return 0;
5074
5075 cpu_relax();
5076 }
5077out:
5078 return 1;
5079}
5080#endif
5081
4618#ifdef CONFIG_PREEMPT 5082#ifdef CONFIG_PREEMPT
4619/* 5083/*
4620 * this is the entry point to schedule() from in-kernel preemption 5084 * this is the entry point to schedule() from in-kernel preemption
@@ -4642,7 +5106,7 @@ asmlinkage void __sched preempt_schedule(void)
4642 * between schedule and now. 5106 * between schedule and now.
4643 */ 5107 */
4644 barrier(); 5108 barrier();
4645 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5109 } while (need_resched());
4646} 5110}
4647EXPORT_SYMBOL(preempt_schedule); 5111EXPORT_SYMBOL(preempt_schedule);
4648 5112
@@ -4671,7 +5135,7 @@ asmlinkage void __sched preempt_schedule_irq(void)
4671 * between schedule and now. 5135 * between schedule and now.
4672 */ 5136 */
4673 barrier(); 5137 barrier();
4674 } while (unlikely(test_thread_flag(TIF_NEED_RESCHED))); 5138 } while (need_resched());
4675} 5139}
4676 5140
4677#endif /* CONFIG_PREEMPT */ 5141#endif /* CONFIG_PREEMPT */
@@ -4732,11 +5196,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4732 __wake_up_common(q, mode, 1, 0, NULL); 5196 __wake_up_common(q, mode, 1, 0, NULL);
4733} 5197}
4734 5198
5199void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
5200{
5201 __wake_up_common(q, mode, 1, 0, key);
5202}
5203
4735/** 5204/**
4736 * __wake_up_sync - wake up threads blocked on a waitqueue. 5205 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
4737 * @q: the waitqueue 5206 * @q: the waitqueue
4738 * @mode: which threads 5207 * @mode: which threads
4739 * @nr_exclusive: how many wake-one or wake-many threads to wake up 5208 * @nr_exclusive: how many wake-one or wake-many threads to wake up
5209 * @key: opaque value to be passed to wakeup targets
4740 * 5210 *
4741 * The sync wakeup differs that the waker knows that it will schedule 5211 * The sync wakeup differs that the waker knows that it will schedule
4742 * away soon, so while the target thread will be woken up, it will not 5212 * away soon, so while the target thread will be woken up, it will not
@@ -4745,8 +5215,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4745 * 5215 *
4746 * On UP it can prevent extra preemption. 5216 * On UP it can prevent extra preemption.
4747 */ 5217 */
4748void 5218void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
4749__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) 5219 int nr_exclusive, void *key)
4750{ 5220{
4751 unsigned long flags; 5221 unsigned long flags;
4752 int sync = 1; 5222 int sync = 1;
@@ -4758,9 +5228,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4758 sync = 0; 5228 sync = 0;
4759 5229
4760 spin_lock_irqsave(&q->lock, flags); 5230 spin_lock_irqsave(&q->lock, flags);
4761 __wake_up_common(q, mode, nr_exclusive, sync, NULL); 5231 __wake_up_common(q, mode, nr_exclusive, sync, key);
4762 spin_unlock_irqrestore(&q->lock, flags); 5232 spin_unlock_irqrestore(&q->lock, flags);
4763} 5233}
5234EXPORT_SYMBOL_GPL(__wake_up_sync_key);
5235
5236/*
5237 * __wake_up_sync - see __wake_up_sync_key()
5238 */
5239void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
5240{
5241 __wake_up_sync_key(q, mode, nr_exclusive, NULL);
5242}
4764EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ 5243EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4765 5244
4766/** 5245/**
@@ -5145,7 +5624,7 @@ SYSCALL_DEFINE1(nice, int, increment)
5145 if (increment > 40) 5624 if (increment > 40)
5146 increment = 40; 5625 increment = 40;
5147 5626
5148 nice = PRIO_TO_NICE(current->static_prio) + increment; 5627 nice = TASK_NICE(current) + increment;
5149 if (nice < -20) 5628 if (nice < -20)
5150 nice = -20; 5629 nice = -20;
5151 if (nice > 19) 5630 if (nice > 19)
@@ -5944,12 +6423,7 @@ void sched_show_task(struct task_struct *p)
5944 printk(KERN_CONT " %016lx ", thread_saved_pc(p)); 6423 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
5945#endif 6424#endif
5946#ifdef CONFIG_DEBUG_STACK_USAGE 6425#ifdef CONFIG_DEBUG_STACK_USAGE
5947 { 6426 free = stack_not_used(p);
5948 unsigned long *n = end_of_stack(p);
5949 while (!*n)
5950 n++;
5951 free = (unsigned long)n - (unsigned long)end_of_stack(p);
5952 }
5953#endif 6427#endif
5954 printk(KERN_CONT "%5lu %5d %6d\n", free, 6428 printk(KERN_CONT "%5lu %5d %6d\n", free,
5955 task_pid_nr(p), task_pid_nr(p->real_parent)); 6429 task_pid_nr(p), task_pid_nr(p->real_parent));
@@ -6423,7 +6897,7 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
6423 if (!rq->nr_running) 6897 if (!rq->nr_running)
6424 break; 6898 break;
6425 update_rq_clock(rq); 6899 update_rq_clock(rq);
6426 next = pick_next_task(rq, rq->curr); 6900 next = pick_next_task(rq);
6427 if (!next) 6901 if (!next)
6428 break; 6902 break;
6429 next->sched_class->put_prev_task(rq, next); 6903 next->sched_class->put_prev_task(rq, next);
@@ -8218,11 +8692,15 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
8218 __set_bit(MAX_RT_PRIO, array->bitmap); 8692 __set_bit(MAX_RT_PRIO, array->bitmap);
8219 8693
8220#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 8694#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
8221 rt_rq->highest_prio = MAX_RT_PRIO; 8695 rt_rq->highest_prio.curr = MAX_RT_PRIO;
8696#ifdef CONFIG_SMP
8697 rt_rq->highest_prio.next = MAX_RT_PRIO;
8698#endif
8222#endif 8699#endif
8223#ifdef CONFIG_SMP 8700#ifdef CONFIG_SMP
8224 rt_rq->rt_nr_migratory = 0; 8701 rt_rq->rt_nr_migratory = 0;
8225 rt_rq->overloaded = 0; 8702 rt_rq->overloaded = 0;
8703 plist_head_init(&rq->rt.pushable_tasks, &rq->lock);
8226#endif 8704#endif
8227 8705
8228 rt_rq->rt_time = 0; 8706 rt_rq->rt_time = 0;
@@ -9490,7 +9968,7 @@ cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
9490 9968
9491static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu) 9969static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9492{ 9970{
9493 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9971 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9494 u64 data; 9972 u64 data;
9495 9973
9496#ifndef CONFIG_64BIT 9974#ifndef CONFIG_64BIT
@@ -9509,7 +9987,7 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
9509 9987
9510static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val) 9988static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
9511{ 9989{
9512 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 9990 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9513 9991
9514#ifndef CONFIG_64BIT 9992#ifndef CONFIG_64BIT
9515 /* 9993 /*
@@ -9598,14 +10076,14 @@ static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
9598 struct cpuacct *ca; 10076 struct cpuacct *ca;
9599 int cpu; 10077 int cpu;
9600 10078
9601 if (!cpuacct_subsys.active) 10079 if (unlikely(!cpuacct_subsys.active))
9602 return; 10080 return;
9603 10081
9604 cpu = task_cpu(tsk); 10082 cpu = task_cpu(tsk);
9605 ca = task_ca(tsk); 10083 ca = task_ca(tsk);
9606 10084
9607 for (; ca; ca = ca->parent) { 10085 for (; ca; ca = ca->parent) {
9608 u64 *cpuusage = percpu_ptr(ca->cpuusage, cpu); 10086 u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
9609 *cpuusage += cputime; 10087 *cpuusage += cputime;
9610 } 10088 }
9611} 10089}
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index a0b0852414cc..390f33234bd0 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -24,11 +24,11 @@
24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat 24 * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat
25 * consistent between cpus (never more than 2 jiffies difference). 25 * consistent between cpus (never more than 2 jiffies difference).
26 */ 26 */
27#include <linux/sched.h>
28#include <linux/percpu.h>
29#include <linux/spinlock.h> 27#include <linux/spinlock.h>
30#include <linux/ktime.h>
31#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/percpu.h>
30#include <linux/ktime.h>
31#include <linux/sched.h>
32 32
33/* 33/*
34 * Scheduler clock - returns current time in nanosec units. 34 * Scheduler clock - returns current time in nanosec units.
@@ -43,6 +43,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
43static __read_mostly int sched_clock_running; 43static __read_mostly int sched_clock_running;
44 44
45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK 45#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
46__read_mostly int sched_clock_stable;
46 47
47struct sched_clock_data { 48struct sched_clock_data {
48 /* 49 /*
@@ -87,7 +88,7 @@ void sched_clock_init(void)
87} 88}
88 89
89/* 90/*
90 * min,max except they take wrapping into account 91 * min, max except they take wrapping into account
91 */ 92 */
92 93
93static inline u64 wrap_min(u64 x, u64 y) 94static inline u64 wrap_min(u64 x, u64 y)
@@ -111,15 +112,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
111 s64 delta = now - scd->tick_raw; 112 s64 delta = now - scd->tick_raw;
112 u64 clock, min_clock, max_clock; 113 u64 clock, min_clock, max_clock;
113 114
114 WARN_ON_ONCE(!irqs_disabled());
115
116 if (unlikely(delta < 0)) 115 if (unlikely(delta < 0))
117 delta = 0; 116 delta = 0;
118 117
119 /* 118 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 119 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 120 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC); 121 * scd->tick_gtod + TICK_NSEC);
123 */ 122 */
124 123
125 clock = scd->tick_gtod + delta; 124 clock = scd->tick_gtod + delta;
@@ -148,12 +147,13 @@ static void lock_double_clock(struct sched_clock_data *data1,
148 147
149u64 sched_clock_cpu(int cpu) 148u64 sched_clock_cpu(int cpu)
150{ 149{
151 struct sched_clock_data *scd = cpu_sdc(cpu);
152 u64 now, clock, this_clock, remote_clock; 150 u64 now, clock, this_clock, remote_clock;
151 struct sched_clock_data *scd;
153 152
154 if (unlikely(!sched_clock_running)) 153 if (sched_clock_stable)
155 return 0ull; 154 return sched_clock();
156 155
156 scd = cpu_sdc(cpu);
157 WARN_ON_ONCE(!irqs_disabled()); 157 WARN_ON_ONCE(!irqs_disabled());
158 now = sched_clock(); 158 now = sched_clock();
159 159
@@ -195,14 +195,18 @@ u64 sched_clock_cpu(int cpu)
195 195
196void sched_clock_tick(void) 196void sched_clock_tick(void)
197{ 197{
198 struct sched_clock_data *scd = this_scd(); 198 struct sched_clock_data *scd;
199 u64 now, now_gtod; 199 u64 now, now_gtod;
200 200
201 if (sched_clock_stable)
202 return;
203
201 if (unlikely(!sched_clock_running)) 204 if (unlikely(!sched_clock_running))
202 return; 205 return;
203 206
204 WARN_ON_ONCE(!irqs_disabled()); 207 WARN_ON_ONCE(!irqs_disabled());
205 208
209 scd = this_scd();
206 now_gtod = ktime_to_ns(ktime_get()); 210 now_gtod = ktime_to_ns(ktime_get());
207 now = sched_clock(); 211 now = sched_clock();
208 212
@@ -250,7 +254,7 @@ u64 sched_clock_cpu(int cpu)
250 return sched_clock(); 254 return sched_clock();
251} 255}
252 256
253#endif 257#endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */
254 258
255unsigned long long cpu_clock(int cpu) 259unsigned long long cpu_clock(int cpu)
256{ 260{
diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h
index 642a94ef8a0a..9a7e859b8fbf 100644
--- a/kernel/sched_cpupri.h
+++ b/kernel/sched_cpupri.h
@@ -25,7 +25,7 @@ struct cpupri {
25 25
26#ifdef CONFIG_SMP 26#ifdef CONFIG_SMP
27int cpupri_find(struct cpupri *cp, 27int cpupri_find(struct cpupri *cp,
28 struct task_struct *p, cpumask_t *lowest_mask); 28 struct task_struct *p, struct cpumask *lowest_mask);
29void cpupri_set(struct cpupri *cp, int cpu, int pri); 29void cpupri_set(struct cpupri *cp, int cpu, int pri);
30int cpupri_init(struct cpupri *cp, bool bootmem); 30int cpupri_init(struct cpupri *cp, bool bootmem);
31void cpupri_cleanup(struct cpupri *cp); 31void cpupri_cleanup(struct cpupri *cp);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 16eeba4e4169..467ca72f1657 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -272,7 +272,6 @@ static void print_cpu(struct seq_file *m, int cpu)
272 P(nr_switches); 272 P(nr_switches);
273 P(nr_load_updates); 273 P(nr_load_updates);
274 P(nr_uninterruptible); 274 P(nr_uninterruptible);
275 SEQ_printf(m, " .%-30s: %lu\n", "jiffies", jiffies);
276 PN(next_balance); 275 PN(next_balance);
277 P(curr->pid); 276 P(curr->pid);
278 PN(clock); 277 PN(clock);
@@ -287,9 +286,6 @@ static void print_cpu(struct seq_file *m, int cpu)
287#ifdef CONFIG_SCHEDSTATS 286#ifdef CONFIG_SCHEDSTATS
288#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n); 287#define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
289 288
290 P(yld_exp_empty);
291 P(yld_act_empty);
292 P(yld_both_empty);
293 P(yld_count); 289 P(yld_count);
294 290
295 P(sched_switch); 291 P(sched_switch);
@@ -314,7 +310,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
314 u64 now = ktime_to_ns(ktime_get()); 310 u64 now = ktime_to_ns(ktime_get());
315 int cpu; 311 int cpu;
316 312
317 SEQ_printf(m, "Sched Debug Version: v0.08, %s %.*s\n", 313 SEQ_printf(m, "Sched Debug Version: v0.09, %s %.*s\n",
318 init_utsname()->release, 314 init_utsname()->release,
319 (int)strcspn(init_utsname()->version, " "), 315 (int)strcspn(init_utsname()->version, " "),
320 init_utsname()->version); 316 init_utsname()->version);
@@ -325,6 +321,7 @@ static int sched_debug_show(struct seq_file *m, void *v)
325 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x)) 321 SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
326#define PN(x) \ 322#define PN(x) \
327 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x)) 323 SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
324 P(jiffies);
328 PN(sysctl_sched_latency); 325 PN(sysctl_sched_latency);
329 PN(sysctl_sched_min_granularity); 326 PN(sysctl_sched_min_granularity);
330 PN(sysctl_sched_wakeup_granularity); 327 PN(sysctl_sched_wakeup_granularity);
@@ -397,6 +394,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
397 PN(se.vruntime); 394 PN(se.vruntime);
398 PN(se.sum_exec_runtime); 395 PN(se.sum_exec_runtime);
399 PN(se.avg_overlap); 396 PN(se.avg_overlap);
397 PN(se.avg_wakeup);
400 398
401 nr_switches = p->nvcsw + p->nivcsw; 399 nr_switches = p->nvcsw + p->nivcsw;
402 400
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 0566f2a03c42..3816f217f119 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1314,16 +1314,63 @@ out:
1314} 1314}
1315#endif /* CONFIG_SMP */ 1315#endif /* CONFIG_SMP */
1316 1316
1317static unsigned long wakeup_gran(struct sched_entity *se) 1317/*
1318 * Adaptive granularity
1319 *
1320 * se->avg_wakeup gives the average time a task runs until it does a wakeup,
1321 * with the limit of wakeup_gran -- when it never does a wakeup.
1322 *
1323 * So the smaller avg_wakeup is the faster we want this task to preempt,
1324 * but we don't want to treat the preemptee unfairly and therefore allow it
1325 * to run for at least the amount of time we'd like to run.
1326 *
1327 * NOTE: we use 2*avg_wakeup to increase the probability of actually doing one
1328 *
1329 * NOTE: we use *nr_running to scale with load, this nicely matches the
1330 * degrading latency on load.
1331 */
1332static unsigned long
1333adaptive_gran(struct sched_entity *curr, struct sched_entity *se)
1334{
1335 u64 this_run = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
1336 u64 expected_wakeup = 2*se->avg_wakeup * cfs_rq_of(se)->nr_running;
1337 u64 gran = 0;
1338
1339 if (this_run < expected_wakeup)
1340 gran = expected_wakeup - this_run;
1341
1342 return min_t(s64, gran, sysctl_sched_wakeup_granularity);
1343}
1344
1345static unsigned long
1346wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
1318{ 1347{
1319 unsigned long gran = sysctl_sched_wakeup_granularity; 1348 unsigned long gran = sysctl_sched_wakeup_granularity;
1320 1349
1350 if (cfs_rq_of(curr)->curr && sched_feat(ADAPTIVE_GRAN))
1351 gran = adaptive_gran(curr, se);
1352
1321 /* 1353 /*
1322 * More easily preempt - nice tasks, while not making it harder for 1354 * Since its curr running now, convert the gran from real-time
1323 * + nice tasks. 1355 * to virtual-time in his units.
1324 */ 1356 */
1325 if (!sched_feat(ASYM_GRAN) || se->load.weight > NICE_0_LOAD) 1357 if (sched_feat(ASYM_GRAN)) {
1326 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); 1358 /*
1359 * By using 'se' instead of 'curr' we penalize light tasks, so
1360 * they get preempted easier. That is, if 'se' < 'curr' then
1361 * the resulting gran will be larger, therefore penalizing the
1362 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1363 * be smaller, again penalizing the lighter task.
1364 *
1365 * This is especially important for buddies when the leftmost
1366 * task is higher priority than the buddy.
1367 */
1368 if (unlikely(se->load.weight != NICE_0_LOAD))
1369 gran = calc_delta_fair(gran, se);
1370 } else {
1371 if (unlikely(curr->load.weight != NICE_0_LOAD))
1372 gran = calc_delta_fair(gran, curr);
1373 }
1327 1374
1328 return gran; 1375 return gran;
1329} 1376}
@@ -1350,7 +1397,7 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1350 if (vdiff <= 0) 1397 if (vdiff <= 0)
1351 return -1; 1398 return -1;
1352 1399
1353 gran = wakeup_gran(curr); 1400 gran = wakeup_gran(curr, se);
1354 if (vdiff > gran) 1401 if (vdiff > gran)
1355 return 1; 1402 return 1;
1356 1403
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index da5d93b5d2c6..4569bfa7df9b 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -1,5 +1,6 @@
1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1) 1SCHED_FEAT(NEW_FAIR_SLEEPERS, 1)
2SCHED_FEAT(NORMALIZED_SLEEPER, 1) 2SCHED_FEAT(NORMALIZED_SLEEPER, 0)
3SCHED_FEAT(ADAPTIVE_GRAN, 1)
3SCHED_FEAT(WAKEUP_PREEMPT, 1) 4SCHED_FEAT(WAKEUP_PREEMPT, 1)
4SCHED_FEAT(START_DEBIT, 1) 5SCHED_FEAT(START_DEBIT, 1)
5SCHED_FEAT(AFFINE_WAKEUPS, 1) 6SCHED_FEAT(AFFINE_WAKEUPS, 1)
@@ -13,3 +14,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
13SCHED_FEAT(ASYM_EFF_LOAD, 1) 14SCHED_FEAT(ASYM_EFF_LOAD, 1)
14SCHED_FEAT(WAKEUP_OVERLAP, 0) 15SCHED_FEAT(WAKEUP_OVERLAP, 0)
15SCHED_FEAT(LAST_BUDDY, 1) 16SCHED_FEAT(LAST_BUDDY, 1)
17SCHED_FEAT(OWNER_SPIN, 1)
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index bac1061cea2f..299d012b4394 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -3,6 +3,40 @@
3 * policies) 3 * policies)
4 */ 4 */
5 5
6static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
7{
8 return container_of(rt_se, struct task_struct, rt);
9}
10
11#ifdef CONFIG_RT_GROUP_SCHED
12
13static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
14{
15 return rt_rq->rq;
16}
17
18static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
19{
20 return rt_se->rt_rq;
21}
22
23#else /* CONFIG_RT_GROUP_SCHED */
24
25static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
26{
27 return container_of(rt_rq, struct rq, rt);
28}
29
30static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
31{
32 struct task_struct *p = rt_task_of(rt_se);
33 struct rq *rq = task_rq(p);
34
35 return &rq->rt;
36}
37
38#endif /* CONFIG_RT_GROUP_SCHED */
39
6#ifdef CONFIG_SMP 40#ifdef CONFIG_SMP
7 41
8static inline int rt_overloaded(struct rq *rq) 42static inline int rt_overloaded(struct rq *rq)
@@ -37,25 +71,69 @@ static inline void rt_clear_overload(struct rq *rq)
37 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); 71 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
38} 72}
39 73
40static void update_rt_migration(struct rq *rq) 74static void update_rt_migration(struct rt_rq *rt_rq)
41{ 75{
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) { 76 if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) { 77 if (!rt_rq->overloaded) {
44 rt_set_overload(rq); 78 rt_set_overload(rq_of_rt_rq(rt_rq));
45 rq->rt.overloaded = 1; 79 rt_rq->overloaded = 1;
46 } 80 }
47 } else if (rq->rt.overloaded) { 81 } else if (rt_rq->overloaded) {
48 rt_clear_overload(rq); 82 rt_clear_overload(rq_of_rt_rq(rt_rq));
49 rq->rt.overloaded = 0; 83 rt_rq->overloaded = 0;
50 } 84 }
51} 85}
52#endif /* CONFIG_SMP */
53 86
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) 87static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
88{
89 if (rt_se->nr_cpus_allowed > 1)
90 rt_rq->rt_nr_migratory++;
91
92 update_rt_migration(rt_rq);
93}
94
95static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
96{
97 if (rt_se->nr_cpus_allowed > 1)
98 rt_rq->rt_nr_migratory--;
99
100 update_rt_migration(rt_rq);
101}
102
103static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
104{
105 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
106 plist_node_init(&p->pushable_tasks, p->prio);
107 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
108}
109
110static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
111{
112 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
113}
114
115#else
116
117static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
55{ 118{
56 return container_of(rt_se, struct task_struct, rt);
57} 119}
58 120
121static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
122{
123}
124
125static inline
126void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
127{
128}
129
130static inline
131void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
132{
133}
134
135#endif /* CONFIG_SMP */
136
59static inline int on_rt_rq(struct sched_rt_entity *rt_se) 137static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{ 138{
61 return !list_empty(&rt_se->run_list); 139 return !list_empty(&rt_se->run_list);
@@ -79,16 +157,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
79#define for_each_leaf_rt_rq(rt_rq, rq) \ 157#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) 158 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81 159
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \ 160#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent) 161 for (; rt_se; rt_se = rt_se->parent)
94 162
@@ -108,7 +176,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
108 if (rt_rq->rt_nr_running) { 176 if (rt_rq->rt_nr_running) {
109 if (rt_se && !on_rt_rq(rt_se)) 177 if (rt_se && !on_rt_rq(rt_se))
110 enqueue_rt_entity(rt_se); 178 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 179 if (rt_rq->highest_prio.curr < curr->prio)
112 resched_task(curr); 180 resched_task(curr);
113 } 181 }
114} 182}
@@ -176,19 +244,6 @@ static inline u64 sched_rt_period(struct rt_rq *rt_rq)
176#define for_each_leaf_rt_rq(rt_rq, rq) \ 244#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) 245 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178 246
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \ 247#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL) 248 for (; rt_se; rt_se = NULL)
194 249
@@ -473,7 +528,7 @@ static inline int rt_se_prio(struct sched_rt_entity *rt_se)
473 struct rt_rq *rt_rq = group_rt_rq(rt_se); 528 struct rt_rq *rt_rq = group_rt_rq(rt_se);
474 529
475 if (rt_rq) 530 if (rt_rq)
476 return rt_rq->highest_prio; 531 return rt_rq->highest_prio.curr;
477#endif 532#endif
478 533
479 return rt_task_of(rt_se)->prio; 534 return rt_task_of(rt_se)->prio;
@@ -547,91 +602,174 @@ static void update_curr_rt(struct rq *rq)
547 } 602 }
548} 603}
549 604
550static inline 605#if defined CONFIG_SMP
551void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 606
607static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu);
608
609static inline int next_prio(struct rq *rq)
552{ 610{
553 WARN_ON(!rt_prio(rt_se_prio(rt_se))); 611 struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu);
554 rt_rq->rt_nr_running++; 612
555#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 613 if (next && rt_prio(next->prio))
556 if (rt_se_prio(rt_se) < rt_rq->highest_prio) { 614 return next->prio;
557#ifdef CONFIG_SMP 615 else
558 struct rq *rq = rq_of_rt_rq(rt_rq); 616 return MAX_RT_PRIO;
559#endif 617}
618
619static void
620inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
621{
622 struct rq *rq = rq_of_rt_rq(rt_rq);
623
624 if (prio < prev_prio) {
625
626 /*
627 * If the new task is higher in priority than anything on the
628 * run-queue, we know that the previous high becomes our
629 * next-highest.
630 */
631 rt_rq->highest_prio.next = prev_prio;
560 632
561 rt_rq->highest_prio = rt_se_prio(rt_se);
562#ifdef CONFIG_SMP
563 if (rq->online) 633 if (rq->online)
564 cpupri_set(&rq->rd->cpupri, rq->cpu, 634 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
565 rt_se_prio(rt_se));
566#endif
567 }
568#endif
569#ifdef CONFIG_SMP
570 if (rt_se->nr_cpus_allowed > 1) {
571 struct rq *rq = rq_of_rt_rq(rt_rq);
572 635
573 rq->rt.rt_nr_migratory++; 636 } else if (prio == rt_rq->highest_prio.curr)
574 } 637 /*
638 * If the next task is equal in priority to the highest on
639 * the run-queue, then we implicitly know that the next highest
640 * task cannot be any lower than current
641 */
642 rt_rq->highest_prio.next = prio;
643 else if (prio < rt_rq->highest_prio.next)
644 /*
645 * Otherwise, we need to recompute next-highest
646 */
647 rt_rq->highest_prio.next = next_prio(rq);
648}
575 649
576 update_rt_migration(rq_of_rt_rq(rt_rq)); 650static void
577#endif 651dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
578#ifdef CONFIG_RT_GROUP_SCHED 652{
579 if (rt_se_boosted(rt_se)) 653 struct rq *rq = rq_of_rt_rq(rt_rq);
580 rt_rq->rt_nr_boosted++;
581 654
582 if (rt_rq->tg) 655 if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next))
583 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); 656 rt_rq->highest_prio.next = next_prio(rq);
584#else 657
585 start_rt_bandwidth(&def_rt_bandwidth); 658 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
586#endif 659 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
587} 660}
588 661
662#else /* CONFIG_SMP */
663
589static inline 664static inline
590void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 665void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
591{ 666static inline
592#ifdef CONFIG_SMP 667void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
593 int highest_prio = rt_rq->highest_prio; 668
594#endif 669#endif /* CONFIG_SMP */
595 670
596 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
597 WARN_ON(!rt_rq->rt_nr_running);
598 rt_rq->rt_nr_running--;
599#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED 671#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
672static void
673inc_rt_prio(struct rt_rq *rt_rq, int prio)
674{
675 int prev_prio = rt_rq->highest_prio.curr;
676
677 if (prio < prev_prio)
678 rt_rq->highest_prio.curr = prio;
679
680 inc_rt_prio_smp(rt_rq, prio, prev_prio);
681}
682
683static void
684dec_rt_prio(struct rt_rq *rt_rq, int prio)
685{
686 int prev_prio = rt_rq->highest_prio.curr;
687
600 if (rt_rq->rt_nr_running) { 688 if (rt_rq->rt_nr_running) {
601 struct rt_prio_array *array;
602 689
603 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio); 690 WARN_ON(prio < prev_prio);
604 if (rt_se_prio(rt_se) == rt_rq->highest_prio) { 691
605 /* recalculate */ 692 /*
606 array = &rt_rq->active; 693 * This may have been our highest task, and therefore
607 rt_rq->highest_prio = 694 * we may have some recomputation to do
695 */
696 if (prio == prev_prio) {
697 struct rt_prio_array *array = &rt_rq->active;
698
699 rt_rq->highest_prio.curr =
608 sched_find_first_bit(array->bitmap); 700 sched_find_first_bit(array->bitmap);
609 } /* otherwise leave rq->highest prio alone */ 701 }
702
610 } else 703 } else
611 rt_rq->highest_prio = MAX_RT_PRIO; 704 rt_rq->highest_prio.curr = MAX_RT_PRIO;
612#endif
613#ifdef CONFIG_SMP
614 if (rt_se->nr_cpus_allowed > 1) {
615 struct rq *rq = rq_of_rt_rq(rt_rq);
616 rq->rt.rt_nr_migratory--;
617 }
618 705
619 if (rt_rq->highest_prio != highest_prio) { 706 dec_rt_prio_smp(rt_rq, prio, prev_prio);
620 struct rq *rq = rq_of_rt_rq(rt_rq); 707}
621 708
622 if (rq->online) 709#else
623 cpupri_set(&rq->rd->cpupri, rq->cpu, 710
624 rt_rq->highest_prio); 711static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
625 } 712static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
713
714#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
626 715
627 update_rt_migration(rq_of_rt_rq(rt_rq));
628#endif /* CONFIG_SMP */
629#ifdef CONFIG_RT_GROUP_SCHED 716#ifdef CONFIG_RT_GROUP_SCHED
717
718static void
719inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
720{
721 if (rt_se_boosted(rt_se))
722 rt_rq->rt_nr_boosted++;
723
724 if (rt_rq->tg)
725 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
726}
727
728static void
729dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
730{
630 if (rt_se_boosted(rt_se)) 731 if (rt_se_boosted(rt_se))
631 rt_rq->rt_nr_boosted--; 732 rt_rq->rt_nr_boosted--;
632 733
633 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); 734 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
634#endif 735}
736
737#else /* CONFIG_RT_GROUP_SCHED */
738
739static void
740inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
741{
742 start_rt_bandwidth(&def_rt_bandwidth);
743}
744
745static inline
746void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
747
748#endif /* CONFIG_RT_GROUP_SCHED */
749
750static inline
751void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
752{
753 int prio = rt_se_prio(rt_se);
754
755 WARN_ON(!rt_prio(prio));
756 rt_rq->rt_nr_running++;
757
758 inc_rt_prio(rt_rq, prio);
759 inc_rt_migration(rt_se, rt_rq);
760 inc_rt_group(rt_se, rt_rq);
761}
762
763static inline
764void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
765{
766 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
767 WARN_ON(!rt_rq->rt_nr_running);
768 rt_rq->rt_nr_running--;
769
770 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
771 dec_rt_migration(rt_se, rt_rq);
772 dec_rt_group(rt_se, rt_rq);
635} 773}
636 774
637static void __enqueue_rt_entity(struct sched_rt_entity *rt_se) 775static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
@@ -718,6 +856,9 @@ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
718 856
719 enqueue_rt_entity(rt_se); 857 enqueue_rt_entity(rt_se);
720 858
859 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1)
860 enqueue_pushable_task(rq, p);
861
721 inc_cpu_load(rq, p->se.load.weight); 862 inc_cpu_load(rq, p->se.load.weight);
722} 863}
723 864
@@ -728,6 +869,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
728 update_curr_rt(rq); 869 update_curr_rt(rq);
729 dequeue_rt_entity(rt_se); 870 dequeue_rt_entity(rt_se);
730 871
872 dequeue_pushable_task(rq, p);
873
731 dec_cpu_load(rq, p->se.load.weight); 874 dec_cpu_load(rq, p->se.load.weight);
732} 875}
733 876
@@ -878,7 +1021,7 @@ static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
878 return next; 1021 return next;
879} 1022}
880 1023
881static struct task_struct *pick_next_task_rt(struct rq *rq) 1024static struct task_struct *_pick_next_task_rt(struct rq *rq)
882{ 1025{
883 struct sched_rt_entity *rt_se; 1026 struct sched_rt_entity *rt_se;
884 struct task_struct *p; 1027 struct task_struct *p;
@@ -900,6 +1043,18 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
900 1043
901 p = rt_task_of(rt_se); 1044 p = rt_task_of(rt_se);
902 p->se.exec_start = rq->clock; 1045 p->se.exec_start = rq->clock;
1046
1047 return p;
1048}
1049
1050static struct task_struct *pick_next_task_rt(struct rq *rq)
1051{
1052 struct task_struct *p = _pick_next_task_rt(rq);
1053
1054 /* The running task is never eligible for pushing */
1055 if (p)
1056 dequeue_pushable_task(rq, p);
1057
903 return p; 1058 return p;
904} 1059}
905 1060
@@ -907,6 +1062,13 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
907{ 1062{
908 update_curr_rt(rq); 1063 update_curr_rt(rq);
909 p->se.exec_start = 0; 1064 p->se.exec_start = 0;
1065
1066 /*
1067 * The previous task needs to be made eligible for pushing
1068 * if it is still active
1069 */
1070 if (p->se.on_rq && p->rt.nr_cpus_allowed > 1)
1071 enqueue_pushable_task(rq, p);
910} 1072}
911 1073
912#ifdef CONFIG_SMP 1074#ifdef CONFIG_SMP
@@ -960,12 +1122,13 @@ static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
960 1122
961static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); 1123static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
962 1124
963static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask) 1125static inline int pick_optimal_cpu(int this_cpu,
1126 const struct cpumask *mask)
964{ 1127{
965 int first; 1128 int first;
966 1129
967 /* "this_cpu" is cheaper to preempt than a remote processor */ 1130 /* "this_cpu" is cheaper to preempt than a remote processor */
968 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) 1131 if ((this_cpu != -1) && cpumask_test_cpu(this_cpu, mask))
969 return this_cpu; 1132 return this_cpu;
970 1133
971 first = cpumask_first(mask); 1134 first = cpumask_first(mask);
@@ -981,6 +1144,7 @@ static int find_lowest_rq(struct task_struct *task)
981 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); 1144 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
982 int this_cpu = smp_processor_id(); 1145 int this_cpu = smp_processor_id();
983 int cpu = task_cpu(task); 1146 int cpu = task_cpu(task);
1147 cpumask_var_t domain_mask;
984 1148
985 if (task->rt.nr_cpus_allowed == 1) 1149 if (task->rt.nr_cpus_allowed == 1)
986 return -1; /* No other targets possible */ 1150 return -1; /* No other targets possible */
@@ -1013,19 +1177,25 @@ static int find_lowest_rq(struct task_struct *task)
1013 if (this_cpu == cpu) 1177 if (this_cpu == cpu)
1014 this_cpu = -1; /* Skip this_cpu opt if the same */ 1178 this_cpu = -1; /* Skip this_cpu opt if the same */
1015 1179
1016 for_each_domain(cpu, sd) { 1180 if (alloc_cpumask_var(&domain_mask, GFP_ATOMIC)) {
1017 if (sd->flags & SD_WAKE_AFFINE) { 1181 for_each_domain(cpu, sd) {
1018 cpumask_t domain_mask; 1182 if (sd->flags & SD_WAKE_AFFINE) {
1019 int best_cpu; 1183 int best_cpu;
1184
1185 cpumask_and(domain_mask,
1186 sched_domain_span(sd),
1187 lowest_mask);
1020 1188
1021 cpumask_and(&domain_mask, sched_domain_span(sd), 1189 best_cpu = pick_optimal_cpu(this_cpu,
1022 lowest_mask); 1190 domain_mask);
1023 1191
1024 best_cpu = pick_optimal_cpu(this_cpu, 1192 if (best_cpu != -1) {
1025 &domain_mask); 1193 free_cpumask_var(domain_mask);
1026 if (best_cpu != -1) 1194 return best_cpu;
1027 return best_cpu; 1195 }
1196 }
1028 } 1197 }
1198 free_cpumask_var(domain_mask);
1029 } 1199 }
1030 1200
1031 /* 1201 /*
@@ -1072,7 +1242,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1072 } 1242 }
1073 1243
1074 /* If this rq is still suitable use it. */ 1244 /* If this rq is still suitable use it. */
1075 if (lowest_rq->rt.highest_prio > task->prio) 1245 if (lowest_rq->rt.highest_prio.curr > task->prio)
1076 break; 1246 break;
1077 1247
1078 /* try again */ 1248 /* try again */
@@ -1083,6 +1253,31 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1083 return lowest_rq; 1253 return lowest_rq;
1084} 1254}
1085 1255
1256static inline int has_pushable_tasks(struct rq *rq)
1257{
1258 return !plist_head_empty(&rq->rt.pushable_tasks);
1259}
1260
1261static struct task_struct *pick_next_pushable_task(struct rq *rq)
1262{
1263 struct task_struct *p;
1264
1265 if (!has_pushable_tasks(rq))
1266 return NULL;
1267
1268 p = plist_first_entry(&rq->rt.pushable_tasks,
1269 struct task_struct, pushable_tasks);
1270
1271 BUG_ON(rq->cpu != task_cpu(p));
1272 BUG_ON(task_current(rq, p));
1273 BUG_ON(p->rt.nr_cpus_allowed <= 1);
1274
1275 BUG_ON(!p->se.on_rq);
1276 BUG_ON(!rt_task(p));
1277
1278 return p;
1279}
1280
1086/* 1281/*
1087 * If the current CPU has more than one RT task, see if the non 1282 * If the current CPU has more than one RT task, see if the non
1088 * running task can migrate over to a CPU that is running a task 1283 * running task can migrate over to a CPU that is running a task
@@ -1092,13 +1287,11 @@ static int push_rt_task(struct rq *rq)
1092{ 1287{
1093 struct task_struct *next_task; 1288 struct task_struct *next_task;
1094 struct rq *lowest_rq; 1289 struct rq *lowest_rq;
1095 int ret = 0;
1096 int paranoid = RT_MAX_TRIES;
1097 1290
1098 if (!rq->rt.overloaded) 1291 if (!rq->rt.overloaded)
1099 return 0; 1292 return 0;
1100 1293
1101 next_task = pick_next_highest_task_rt(rq, -1); 1294 next_task = pick_next_pushable_task(rq);
1102 if (!next_task) 1295 if (!next_task)
1103 return 0; 1296 return 0;
1104 1297
@@ -1127,16 +1320,34 @@ static int push_rt_task(struct rq *rq)
1127 struct task_struct *task; 1320 struct task_struct *task;
1128 /* 1321 /*
1129 * find lock_lowest_rq releases rq->lock 1322 * find lock_lowest_rq releases rq->lock
1130 * so it is possible that next_task has changed. 1323 * so it is possible that next_task has migrated.
1131 * If it has, then try again. 1324 *
1325 * We need to make sure that the task is still on the same
1326 * run-queue and is also still the next task eligible for
1327 * pushing.
1132 */ 1328 */
1133 task = pick_next_highest_task_rt(rq, -1); 1329 task = pick_next_pushable_task(rq);
1134 if (unlikely(task != next_task) && task && paranoid--) { 1330 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1135 put_task_struct(next_task); 1331 /*
1136 next_task = task; 1332 * If we get here, the task hasnt moved at all, but
1137 goto retry; 1333 * it has failed to push. We will not try again,
1334 * since the other cpus will pull from us when they
1335 * are ready.
1336 */
1337 dequeue_pushable_task(rq, next_task);
1338 goto out;
1138 } 1339 }
1139 goto out; 1340
1341 if (!task)
1342 /* No more tasks, just exit */
1343 goto out;
1344
1345 /*
1346 * Something has shifted, try again.
1347 */
1348 put_task_struct(next_task);
1349 next_task = task;
1350 goto retry;
1140 } 1351 }
1141 1352
1142 deactivate_task(rq, next_task, 0); 1353 deactivate_task(rq, next_task, 0);
@@ -1147,23 +1358,12 @@ static int push_rt_task(struct rq *rq)
1147 1358
1148 double_unlock_balance(rq, lowest_rq); 1359 double_unlock_balance(rq, lowest_rq);
1149 1360
1150 ret = 1;
1151out: 1361out:
1152 put_task_struct(next_task); 1362 put_task_struct(next_task);
1153 1363
1154 return ret; 1364 return 1;
1155} 1365}
1156 1366
1157/*
1158 * TODO: Currently we just use the second highest prio task on
1159 * the queue, and stop when it can't migrate (or there's
1160 * no more RT tasks). There may be a case where a lower
1161 * priority RT task has a different affinity than the
1162 * higher RT task. In this case the lower RT task could
1163 * possibly be able to migrate where as the higher priority
1164 * RT task could not. We currently ignore this issue.
1165 * Enhancements are welcome!
1166 */
1167static void push_rt_tasks(struct rq *rq) 1367static void push_rt_tasks(struct rq *rq)
1168{ 1368{
1169 /* push_rt_task will return true if it moved an RT */ 1369 /* push_rt_task will return true if it moved an RT */
@@ -1174,33 +1374,35 @@ static void push_rt_tasks(struct rq *rq)
1174static int pull_rt_task(struct rq *this_rq) 1374static int pull_rt_task(struct rq *this_rq)
1175{ 1375{
1176 int this_cpu = this_rq->cpu, ret = 0, cpu; 1376 int this_cpu = this_rq->cpu, ret = 0, cpu;
1177 struct task_struct *p, *next; 1377 struct task_struct *p;
1178 struct rq *src_rq; 1378 struct rq *src_rq;
1179 1379
1180 if (likely(!rt_overloaded(this_rq))) 1380 if (likely(!rt_overloaded(this_rq)))
1181 return 0; 1381 return 0;
1182 1382
1183 next = pick_next_task_rt(this_rq);
1184
1185 for_each_cpu(cpu, this_rq->rd->rto_mask) { 1383 for_each_cpu(cpu, this_rq->rd->rto_mask) {
1186 if (this_cpu == cpu) 1384 if (this_cpu == cpu)
1187 continue; 1385 continue;
1188 1386
1189 src_rq = cpu_rq(cpu); 1387 src_rq = cpu_rq(cpu);
1388
1389 /*
1390 * Don't bother taking the src_rq->lock if the next highest
1391 * task is known to be lower-priority than our current task.
1392 * This may look racy, but if this value is about to go
1393 * logically higher, the src_rq will push this task away.
1394 * And if its going logically lower, we do not care
1395 */
1396 if (src_rq->rt.highest_prio.next >=
1397 this_rq->rt.highest_prio.curr)
1398 continue;
1399
1190 /* 1400 /*
1191 * We can potentially drop this_rq's lock in 1401 * We can potentially drop this_rq's lock in
1192 * double_lock_balance, and another CPU could 1402 * double_lock_balance, and another CPU could
1193 * steal our next task - hence we must cause 1403 * alter this_rq
1194 * the caller to recalculate the next task
1195 * in that case:
1196 */ 1404 */
1197 if (double_lock_balance(this_rq, src_rq)) { 1405 double_lock_balance(this_rq, src_rq);
1198 struct task_struct *old_next = next;
1199
1200 next = pick_next_task_rt(this_rq);
1201 if (next != old_next)
1202 ret = 1;
1203 }
1204 1406
1205 /* 1407 /*
1206 * Are there still pullable RT tasks? 1408 * Are there still pullable RT tasks?
@@ -1214,7 +1416,7 @@ static int pull_rt_task(struct rq *this_rq)
1214 * Do we have an RT task that preempts 1416 * Do we have an RT task that preempts
1215 * the to-be-scheduled task? 1417 * the to-be-scheduled task?
1216 */ 1418 */
1217 if (p && (!next || (p->prio < next->prio))) { 1419 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
1218 WARN_ON(p == src_rq->curr); 1420 WARN_ON(p == src_rq->curr);
1219 WARN_ON(!p->se.on_rq); 1421 WARN_ON(!p->se.on_rq);
1220 1422
@@ -1224,12 +1426,9 @@ static int pull_rt_task(struct rq *this_rq)
1224 * This is just that p is wakeing up and hasn't 1426 * This is just that p is wakeing up and hasn't
1225 * had a chance to schedule. We only pull 1427 * had a chance to schedule. We only pull
1226 * p if it is lower in priority than the 1428 * p if it is lower in priority than the
1227 * current task on the run queue or 1429 * current task on the run queue
1228 * this_rq next task is lower in prio than
1229 * the current task on that rq.
1230 */ 1430 */
1231 if (p->prio < src_rq->curr->prio || 1431 if (p->prio < src_rq->curr->prio)
1232 (next && next->prio < src_rq->curr->prio))
1233 goto skip; 1432 goto skip;
1234 1433
1235 ret = 1; 1434 ret = 1;
@@ -1242,13 +1441,7 @@ static int pull_rt_task(struct rq *this_rq)
1242 * case there's an even higher prio task 1441 * case there's an even higher prio task
1243 * in another runqueue. (low likelyhood 1442 * in another runqueue. (low likelyhood
1244 * but possible) 1443 * but possible)
1245 *
1246 * Update next so that we won't pick a task
1247 * on another cpu with a priority lower (or equal)
1248 * than the one we just picked.
1249 */ 1444 */
1250 next = p;
1251
1252 } 1445 }
1253 skip: 1446 skip:
1254 double_unlock_balance(this_rq, src_rq); 1447 double_unlock_balance(this_rq, src_rq);
@@ -1260,24 +1453,27 @@ static int pull_rt_task(struct rq *this_rq)
1260static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) 1453static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1261{ 1454{
1262 /* Try to pull RT tasks here if we lower this rq's prio */ 1455 /* Try to pull RT tasks here if we lower this rq's prio */
1263 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio) 1456 if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
1264 pull_rt_task(rq); 1457 pull_rt_task(rq);
1265} 1458}
1266 1459
1460/*
1461 * assumes rq->lock is held
1462 */
1463static int needs_post_schedule_rt(struct rq *rq)
1464{
1465 return has_pushable_tasks(rq);
1466}
1467
1267static void post_schedule_rt(struct rq *rq) 1468static void post_schedule_rt(struct rq *rq)
1268{ 1469{
1269 /* 1470 /*
1270 * If we have more than one rt_task queued, then 1471 * This is only called if needs_post_schedule_rt() indicates that
1271 * see if we can push the other rt_tasks off to other CPUS. 1472 * we need to push tasks away
1272 * Note we may release the rq lock, and since
1273 * the lock was owned by prev, we need to release it
1274 * first via finish_lock_switch and then reaquire it here.
1275 */ 1473 */
1276 if (unlikely(rq->rt.overloaded)) { 1474 spin_lock_irq(&rq->lock);
1277 spin_lock_irq(&rq->lock); 1475 push_rt_tasks(rq);
1278 push_rt_tasks(rq); 1476 spin_unlock_irq(&rq->lock);
1279 spin_unlock_irq(&rq->lock);
1280 }
1281} 1477}
1282 1478
1283/* 1479/*
@@ -1288,7 +1484,8 @@ static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1288{ 1484{
1289 if (!task_running(rq, p) && 1485 if (!task_running(rq, p) &&
1290 !test_tsk_need_resched(rq->curr) && 1486 !test_tsk_need_resched(rq->curr) &&
1291 rq->rt.overloaded) 1487 has_pushable_tasks(rq) &&
1488 p->rt.nr_cpus_allowed > 1)
1292 push_rt_tasks(rq); 1489 push_rt_tasks(rq);
1293} 1490}
1294 1491
@@ -1324,6 +1521,24 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1324 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) { 1521 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1325 struct rq *rq = task_rq(p); 1522 struct rq *rq = task_rq(p);
1326 1523
1524 if (!task_current(rq, p)) {
1525 /*
1526 * Make sure we dequeue this task from the pushable list
1527 * before going further. It will either remain off of
1528 * the list because we are no longer pushable, or it
1529 * will be requeued.
1530 */
1531 if (p->rt.nr_cpus_allowed > 1)
1532 dequeue_pushable_task(rq, p);
1533
1534 /*
1535 * Requeue if our weight is changing and still > 1
1536 */
1537 if (weight > 1)
1538 enqueue_pushable_task(rq, p);
1539
1540 }
1541
1327 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) { 1542 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1328 rq->rt.rt_nr_migratory++; 1543 rq->rt.rt_nr_migratory++;
1329 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) { 1544 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
@@ -1331,7 +1546,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1331 rq->rt.rt_nr_migratory--; 1546 rq->rt.rt_nr_migratory--;
1332 } 1547 }
1333 1548
1334 update_rt_migration(rq); 1549 update_rt_migration(&rq->rt);
1335 } 1550 }
1336 1551
1337 cpumask_copy(&p->cpus_allowed, new_mask); 1552 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -1346,7 +1561,7 @@ static void rq_online_rt(struct rq *rq)
1346 1561
1347 __enable_runtime(rq); 1562 __enable_runtime(rq);
1348 1563
1349 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio); 1564 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1350} 1565}
1351 1566
1352/* Assumes rq->lock is held */ 1567/* Assumes rq->lock is held */
@@ -1438,7 +1653,7 @@ static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1438 * can release the rq lock and p could migrate. 1653 * can release the rq lock and p could migrate.
1439 * Only reschedule if p is still on the same runqueue. 1654 * Only reschedule if p is still on the same runqueue.
1440 */ 1655 */
1441 if (p->prio > rq->rt.highest_prio && rq->curr == p) 1656 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
1442 resched_task(p); 1657 resched_task(p);
1443#else 1658#else
1444 /* For UP simply resched on drop of prio */ 1659 /* For UP simply resched on drop of prio */
@@ -1509,6 +1724,9 @@ static void set_curr_task_rt(struct rq *rq)
1509 struct task_struct *p = rq->curr; 1724 struct task_struct *p = rq->curr;
1510 1725
1511 p->se.exec_start = rq->clock; 1726 p->se.exec_start = rq->clock;
1727
1728 /* The running task is never eligible for pushing */
1729 dequeue_pushable_task(rq, p);
1512} 1730}
1513 1731
1514static const struct sched_class rt_sched_class = { 1732static const struct sched_class rt_sched_class = {
@@ -1531,6 +1749,7 @@ static const struct sched_class rt_sched_class = {
1531 .rq_online = rq_online_rt, 1749 .rq_online = rq_online_rt,
1532 .rq_offline = rq_offline_rt, 1750 .rq_offline = rq_offline_rt,
1533 .pre_schedule = pre_schedule_rt, 1751 .pre_schedule = pre_schedule_rt,
1752 .needs_post_schedule = needs_post_schedule_rt,
1534 .post_schedule = post_schedule_rt, 1753 .post_schedule = post_schedule_rt,
1535 .task_wake_up = task_wake_up_rt, 1754 .task_wake_up = task_wake_up_rt,
1536 .switched_from = switched_from_rt, 1755 .switched_from = switched_from_rt,
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index a8f93dd374e1..32d2bd4061b0 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -4,7 +4,7 @@
4 * bump this up when changing the output format or the meaning of an existing 4 * bump this up when changing the output format or the meaning of an existing
5 * format, so that tools can adapt (or abort) 5 * format, so that tools can adapt (or abort)
6 */ 6 */
7#define SCHEDSTAT_VERSION 14 7#define SCHEDSTAT_VERSION 15
8 8
9static int show_schedstat(struct seq_file *seq, void *v) 9static int show_schedstat(struct seq_file *seq, void *v)
10{ 10{
@@ -26,9 +26,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
26 26
27 /* runqueue-specific stats */ 27 /* runqueue-specific stats */
28 seq_printf(seq, 28 seq_printf(seq,
29 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu", 29 "cpu%d %u %u %u %u %u %u %llu %llu %lu",
30 cpu, rq->yld_both_empty, 30 cpu, rq->yld_count,
31 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
32 rq->sched_switch, rq->sched_count, rq->sched_goidle, 31 rq->sched_switch, rq->sched_count, rq->sched_goidle,
33 rq->ttwu_count, rq->ttwu_local, 32 rq->ttwu_count, rq->ttwu_local,
34 rq->rq_cpu_time, 33 rq->rq_cpu_time,
diff --git a/kernel/signal.c b/kernel/signal.c
index 2a74fe87c0dd..d8034737db4c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -55,10 +55,22 @@ static int sig_handler_ignored(void __user *handler, int sig)
55 (handler == SIG_DFL && sig_kernel_ignore(sig)); 55 (handler == SIG_DFL && sig_kernel_ignore(sig));
56} 56}
57 57
58static int sig_ignored(struct task_struct *t, int sig) 58static int sig_task_ignored(struct task_struct *t, int sig,
59 int from_ancestor_ns)
59{ 60{
60 void __user *handler; 61 void __user *handler;
61 62
63 handler = sig_handler(t, sig);
64
65 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
66 handler == SIG_DFL && !from_ancestor_ns)
67 return 1;
68
69 return sig_handler_ignored(handler, sig);
70}
71
72static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
73{
62 /* 74 /*
63 * Blocked signals are never ignored, since the 75 * Blocked signals are never ignored, since the
64 * signal handler may change by the time it is 76 * signal handler may change by the time it is
@@ -67,14 +79,13 @@ static int sig_ignored(struct task_struct *t, int sig)
67 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) 79 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
68 return 0; 80 return 0;
69 81
70 handler = sig_handler(t, sig); 82 if (!sig_task_ignored(t, sig, from_ancestor_ns))
71 if (!sig_handler_ignored(handler, sig))
72 return 0; 83 return 0;
73 84
74 /* 85 /*
75 * Tracers may want to know about even ignored signals. 86 * Tracers may want to know about even ignored signals.
76 */ 87 */
77 return !tracehook_consider_ignored_signal(t, sig, handler); 88 return !tracehook_consider_ignored_signal(t, sig);
78} 89}
79 90
80/* 91/*
@@ -318,7 +329,7 @@ int unhandled_signal(struct task_struct *tsk, int sig)
318 return 1; 329 return 1;
319 if (handler != SIG_IGN && handler != SIG_DFL) 330 if (handler != SIG_IGN && handler != SIG_DFL)
320 return 0; 331 return 0;
321 return !tracehook_consider_fatal_signal(tsk, sig, handler); 332 return !tracehook_consider_fatal_signal(tsk, sig);
322} 333}
323 334
324 335
@@ -624,7 +635,7 @@ static int check_kill_permission(int sig, struct siginfo *info,
624 * Returns true if the signal should be actually delivered, otherwise 635 * Returns true if the signal should be actually delivered, otherwise
625 * it should be dropped. 636 * it should be dropped.
626 */ 637 */
627static int prepare_signal(int sig, struct task_struct *p) 638static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
628{ 639{
629 struct signal_struct *signal = p->signal; 640 struct signal_struct *signal = p->signal;
630 struct task_struct *t; 641 struct task_struct *t;
@@ -708,7 +719,7 @@ static int prepare_signal(int sig, struct task_struct *p)
708 } 719 }
709 } 720 }
710 721
711 return !sig_ignored(p, sig); 722 return !sig_ignored(p, sig, from_ancestor_ns);
712} 723}
713 724
714/* 725/*
@@ -777,7 +788,7 @@ static void complete_signal(int sig, struct task_struct *p, int group)
777 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && 788 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
778 !sigismember(&t->real_blocked, sig) && 789 !sigismember(&t->real_blocked, sig) &&
779 (sig == SIGKILL || 790 (sig == SIGKILL ||
780 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) { 791 !tracehook_consider_fatal_signal(t, sig))) {
781 /* 792 /*
782 * This signal will be fatal to the whole group. 793 * This signal will be fatal to the whole group.
783 */ 794 */
@@ -813,8 +824,8 @@ static inline int legacy_queue(struct sigpending *signals, int sig)
813 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); 824 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
814} 825}
815 826
816static int send_signal(int sig, struct siginfo *info, struct task_struct *t, 827static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
817 int group) 828 int group, int from_ancestor_ns)
818{ 829{
819 struct sigpending *pending; 830 struct sigpending *pending;
820 struct sigqueue *q; 831 struct sigqueue *q;
@@ -822,7 +833,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
822 trace_sched_signal_send(sig, t); 833 trace_sched_signal_send(sig, t);
823 834
824 assert_spin_locked(&t->sighand->siglock); 835 assert_spin_locked(&t->sighand->siglock);
825 if (!prepare_signal(sig, t)) 836
837 if (!prepare_signal(sig, t, from_ancestor_ns))
826 return 0; 838 return 0;
827 839
828 pending = group ? &t->signal->shared_pending : &t->pending; 840 pending = group ? &t->signal->shared_pending : &t->pending;
@@ -871,6 +883,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
871 break; 883 break;
872 default: 884 default:
873 copy_siginfo(&q->info, info); 885 copy_siginfo(&q->info, info);
886 if (from_ancestor_ns)
887 q->info.si_pid = 0;
874 break; 888 break;
875 } 889 }
876 } else if (!is_si_special(info)) { 890 } else if (!is_si_special(info)) {
@@ -889,6 +903,20 @@ out_set:
889 return 0; 903 return 0;
890} 904}
891 905
906static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
907 int group)
908{
909 int from_ancestor_ns = 0;
910
911#ifdef CONFIG_PID_NS
912 if (!is_si_special(info) && SI_FROMUSER(info) &&
913 task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
914 from_ancestor_ns = 1;
915#endif
916
917 return __send_signal(sig, info, t, group, from_ancestor_ns);
918}
919
892int print_fatal_signals; 920int print_fatal_signals;
893 921
894static void print_fatal_signal(struct pt_regs *regs, int signr) 922static void print_fatal_signal(struct pt_regs *regs, int signr)
@@ -1133,7 +1161,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1133 if (sig && p->sighand) { 1161 if (sig && p->sighand) {
1134 unsigned long flags; 1162 unsigned long flags;
1135 spin_lock_irqsave(&p->sighand->siglock, flags); 1163 spin_lock_irqsave(&p->sighand->siglock, flags);
1136 ret = __group_send_sig_info(sig, info, p); 1164 ret = __send_signal(sig, info, p, 1, 0);
1137 spin_unlock_irqrestore(&p->sighand->siglock, flags); 1165 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1138 } 1166 }
1139out_unlock: 1167out_unlock:
@@ -1320,7 +1348,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1320 goto ret; 1348 goto ret;
1321 1349
1322 ret = 1; /* the signal is ignored */ 1350 ret = 1; /* the signal is ignored */
1323 if (!prepare_signal(sig, t)) 1351 if (!prepare_signal(sig, t, 0))
1324 goto out; 1352 goto out;
1325 1353
1326 ret = 0; 1354 ret = 0;
@@ -1575,7 +1603,15 @@ static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1575 read_lock(&tasklist_lock); 1603 read_lock(&tasklist_lock);
1576 if (may_ptrace_stop()) { 1604 if (may_ptrace_stop()) {
1577 do_notify_parent_cldstop(current, CLD_TRAPPED); 1605 do_notify_parent_cldstop(current, CLD_TRAPPED);
1606 /*
1607 * Don't want to allow preemption here, because
1608 * sys_ptrace() needs this task to be inactive.
1609 *
1610 * XXX: implement read_unlock_no_resched().
1611 */
1612 preempt_disable();
1578 read_unlock(&tasklist_lock); 1613 read_unlock(&tasklist_lock);
1614 preempt_enable_no_resched();
1579 schedule(); 1615 schedule();
1580 } else { 1616 } else {
1581 /* 1617 /*
@@ -1836,9 +1872,16 @@ relock:
1836 1872
1837 /* 1873 /*
1838 * Global init gets no signals it doesn't want. 1874 * Global init gets no signals it doesn't want.
1875 * Container-init gets no signals it doesn't want from same
1876 * container.
1877 *
1878 * Note that if global/container-init sees a sig_kernel_only()
1879 * signal here, the signal must have been generated internally
1880 * or must have come from an ancestor namespace. In either
1881 * case, the signal cannot be dropped.
1839 */ 1882 */
1840 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && 1883 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1841 !signal_group_exit(signal)) 1884 !sig_kernel_only(signr))
1842 continue; 1885 continue;
1843 1886
1844 if (sig_kernel_stop(signr)) { 1887 if (sig_kernel_stop(signr)) {
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
new file mode 100644
index 000000000000..cf2bc01186ef
--- /dev/null
+++ b/kernel/slow-work.c
@@ -0,0 +1,640 @@
1/* Worker thread pool for slow items, such as filesystem lookups or mkdirs
2 *
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 *
11 * See Documentation/slow-work.txt
12 */
13
14#include <linux/module.h>
15#include <linux/slow-work.h>
16#include <linux/kthread.h>
17#include <linux/freezer.h>
18#include <linux/wait.h>
19
20#define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
21 * things to do */
22#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
23 * OOM */
24
25static void slow_work_cull_timeout(unsigned long);
26static void slow_work_oom_timeout(unsigned long);
27
28#ifdef CONFIG_SYSCTL
29static int slow_work_min_threads_sysctl(struct ctl_table *, int, struct file *,
30 void __user *, size_t *, loff_t *);
31
32static int slow_work_max_threads_sysctl(struct ctl_table *, int , struct file *,
33 void __user *, size_t *, loff_t *);
34#endif
35
36/*
37 * The pool of threads has at least min threads in it as long as someone is
38 * using the facility, and may have as many as max.
39 *
40 * A portion of the pool may be processing very slow operations.
41 */
42static unsigned slow_work_min_threads = 2;
43static unsigned slow_work_max_threads = 4;
44static unsigned vslow_work_proportion = 50; /* % of threads that may process
45 * very slow work */
46
47#ifdef CONFIG_SYSCTL
48static const int slow_work_min_min_threads = 2;
49static int slow_work_max_max_threads = 255;
50static const int slow_work_min_vslow = 1;
51static const int slow_work_max_vslow = 99;
52
53ctl_table slow_work_sysctls[] = {
54 {
55 .ctl_name = CTL_UNNUMBERED,
56 .procname = "min-threads",
57 .data = &slow_work_min_threads,
58 .maxlen = sizeof(unsigned),
59 .mode = 0644,
60 .proc_handler = slow_work_min_threads_sysctl,
61 .extra1 = (void *) &slow_work_min_min_threads,
62 .extra2 = &slow_work_max_threads,
63 },
64 {
65 .ctl_name = CTL_UNNUMBERED,
66 .procname = "max-threads",
67 .data = &slow_work_max_threads,
68 .maxlen = sizeof(unsigned),
69 .mode = 0644,
70 .proc_handler = slow_work_max_threads_sysctl,
71 .extra1 = &slow_work_min_threads,
72 .extra2 = (void *) &slow_work_max_max_threads,
73 },
74 {
75 .ctl_name = CTL_UNNUMBERED,
76 .procname = "vslow-percentage",
77 .data = &vslow_work_proportion,
78 .maxlen = sizeof(unsigned),
79 .mode = 0644,
80 .proc_handler = &proc_dointvec_minmax,
81 .extra1 = (void *) &slow_work_min_vslow,
82 .extra2 = (void *) &slow_work_max_vslow,
83 },
84 { .ctl_name = 0 }
85};
86#endif
87
88/*
89 * The active state of the thread pool
90 */
91static atomic_t slow_work_thread_count;
92static atomic_t vslow_work_executing_count;
93
94static bool slow_work_may_not_start_new_thread;
95static bool slow_work_cull; /* cull a thread due to lack of activity */
96static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
97static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
98static struct slow_work slow_work_new_thread; /* new thread starter */
99
100/*
101 * The queues of work items and the lock governing access to them. These are
102 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
103 * as the number of threads bears no relation to the number of CPUs.
104 *
105 * There are two queues of work items: one for slow work items, and one for
106 * very slow work items.
107 */
108static LIST_HEAD(slow_work_queue);
109static LIST_HEAD(vslow_work_queue);
110static DEFINE_SPINLOCK(slow_work_queue_lock);
111
112/*
113 * The thread controls. A variable used to signal to the threads that they
114 * should exit when the queue is empty, a waitqueue used by the threads to wait
115 * for signals, and a completion set by the last thread to exit.
116 */
117static bool slow_work_threads_should_exit;
118static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
119static DECLARE_COMPLETION(slow_work_last_thread_exited);
120
121/*
122 * The number of users of the thread pool and its lock. Whilst this is zero we
123 * have no threads hanging around, and when this reaches zero, we wait for all
124 * active or queued work items to complete and kill all the threads we do have.
125 */
126static int slow_work_user_count;
127static DEFINE_MUTEX(slow_work_user_lock);
128
129/*
130 * Calculate the maximum number of active threads in the pool that are
131 * permitted to process very slow work items.
132 *
133 * The answer is rounded up to at least 1, but may not equal or exceed the
134 * maximum number of the threads in the pool. This means we always have at
135 * least one thread that can process slow work items, and we always have at
136 * least one thread that won't get tied up doing so.
137 */
138static unsigned slow_work_calc_vsmax(void)
139{
140 unsigned vsmax;
141
142 vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
143 vsmax /= 100;
144 vsmax = max(vsmax, 1U);
145 return min(vsmax, slow_work_max_threads - 1);
146}
147
148/*
149 * Attempt to execute stuff queued on a slow thread. Return true if we managed
150 * it, false if there was nothing to do.
151 */
152static bool slow_work_execute(void)
153{
154 struct slow_work *work = NULL;
155 unsigned vsmax;
156 bool very_slow;
157
158 vsmax = slow_work_calc_vsmax();
159
160 /* see if we can schedule a new thread to be started if we're not
161 * keeping up with the work */
162 if (!waitqueue_active(&slow_work_thread_wq) &&
163 (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
164 atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
165 !slow_work_may_not_start_new_thread)
166 slow_work_enqueue(&slow_work_new_thread);
167
168 /* find something to execute */
169 spin_lock_irq(&slow_work_queue_lock);
170 if (!list_empty(&vslow_work_queue) &&
171 atomic_read(&vslow_work_executing_count) < vsmax) {
172 work = list_entry(vslow_work_queue.next,
173 struct slow_work, link);
174 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
175 BUG();
176 list_del_init(&work->link);
177 atomic_inc(&vslow_work_executing_count);
178 very_slow = true;
179 } else if (!list_empty(&slow_work_queue)) {
180 work = list_entry(slow_work_queue.next,
181 struct slow_work, link);
182 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
183 BUG();
184 list_del_init(&work->link);
185 very_slow = false;
186 } else {
187 very_slow = false; /* avoid the compiler warning */
188 }
189 spin_unlock_irq(&slow_work_queue_lock);
190
191 if (!work)
192 return false;
193
194 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
195 BUG();
196
197 work->ops->execute(work);
198
199 if (very_slow)
200 atomic_dec(&vslow_work_executing_count);
201 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
202
203 /* if someone tried to enqueue the item whilst we were executing it,
204 * then it'll be left unenqueued to avoid multiple threads trying to
205 * execute it simultaneously
206 *
207 * there is, however, a race between us testing the pending flag and
208 * getting the spinlock, and between the enqueuer setting the pending
209 * flag and getting the spinlock, so we use a deferral bit to tell us
210 * if the enqueuer got there first
211 */
212 if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
213 spin_lock_irq(&slow_work_queue_lock);
214
215 if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
216 test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
217 goto auto_requeue;
218
219 spin_unlock_irq(&slow_work_queue_lock);
220 }
221
222 work->ops->put_ref(work);
223 return true;
224
225auto_requeue:
226 /* we must complete the enqueue operation
227 * - we transfer our ref on the item back to the appropriate queue
228 * - don't wake another thread up as we're awake already
229 */
230 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
231 list_add_tail(&work->link, &vslow_work_queue);
232 else
233 list_add_tail(&work->link, &slow_work_queue);
234 spin_unlock_irq(&slow_work_queue_lock);
235 return true;
236}
237
238/**
239 * slow_work_enqueue - Schedule a slow work item for processing
240 * @work: The work item to queue
241 *
242 * Schedule a slow work item for processing. If the item is already undergoing
243 * execution, this guarantees not to re-enter the execution routine until the
244 * first execution finishes.
245 *
246 * The item is pinned by this function as it retains a reference to it, managed
247 * through the item operations. The item is unpinned once it has been
248 * executed.
249 *
250 * An item may hog the thread that is running it for a relatively large amount
251 * of time, sufficient, for example, to perform several lookup, mkdir, create
252 * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
253 *
254 * Conversely, if a number of items are awaiting processing, it may take some
255 * time before any given item is given attention. The number of threads in the
256 * pool may be increased to deal with demand, but only up to a limit.
257 *
258 * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
259 * the very slow queue, from which only a portion of the threads will be
260 * allowed to pick items to execute. This ensures that very slow items won't
261 * overly block ones that are just ordinarily slow.
262 *
263 * Returns 0 if successful, -EAGAIN if not.
264 */
265int slow_work_enqueue(struct slow_work *work)
266{
267 unsigned long flags;
268
269 BUG_ON(slow_work_user_count <= 0);
270 BUG_ON(!work);
271 BUG_ON(!work->ops);
272 BUG_ON(!work->ops->get_ref);
273
274 /* when honouring an enqueue request, we only promise that we will run
275 * the work function in the future; we do not promise to run it once
276 * per enqueue request
277 *
278 * we use the PENDING bit to merge together repeat requests without
279 * having to disable IRQs and take the spinlock, whilst still
280 * maintaining our promise
281 */
282 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
283 spin_lock_irqsave(&slow_work_queue_lock, flags);
284
285 /* we promise that we will not attempt to execute the work
286 * function in more than one thread simultaneously
287 *
288 * this, however, leaves us with a problem if we're asked to
289 * enqueue the work whilst someone is executing the work
290 * function as simply queueing the work immediately means that
291 * another thread may try executing it whilst it is already
292 * under execution
293 *
294 * to deal with this, we set the ENQ_DEFERRED bit instead of
295 * enqueueing, and the thread currently executing the work
296 * function will enqueue the work item when the work function
297 * returns and it has cleared the EXECUTING bit
298 */
299 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
300 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
301 } else {
302 if (work->ops->get_ref(work) < 0)
303 goto cant_get_ref;
304 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
305 list_add_tail(&work->link, &vslow_work_queue);
306 else
307 list_add_tail(&work->link, &slow_work_queue);
308 wake_up(&slow_work_thread_wq);
309 }
310
311 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
312 }
313 return 0;
314
315cant_get_ref:
316 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
317 return -EAGAIN;
318}
319EXPORT_SYMBOL(slow_work_enqueue);
320
321/*
322 * Worker thread culling algorithm
323 */
324static bool slow_work_cull_thread(void)
325{
326 unsigned long flags;
327 bool do_cull = false;
328
329 spin_lock_irqsave(&slow_work_queue_lock, flags);
330
331 if (slow_work_cull) {
332 slow_work_cull = false;
333
334 if (list_empty(&slow_work_queue) &&
335 list_empty(&vslow_work_queue) &&
336 atomic_read(&slow_work_thread_count) >
337 slow_work_min_threads) {
338 mod_timer(&slow_work_cull_timer,
339 jiffies + SLOW_WORK_CULL_TIMEOUT);
340 do_cull = true;
341 }
342 }
343
344 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
345 return do_cull;
346}
347
348/*
349 * Determine if there is slow work available for dispatch
350 */
351static inline bool slow_work_available(int vsmax)
352{
353 return !list_empty(&slow_work_queue) ||
354 (!list_empty(&vslow_work_queue) &&
355 atomic_read(&vslow_work_executing_count) < vsmax);
356}
357
358/*
359 * Worker thread dispatcher
360 */
361static int slow_work_thread(void *_data)
362{
363 int vsmax;
364
365 DEFINE_WAIT(wait);
366
367 set_freezable();
368 set_user_nice(current, -5);
369
370 for (;;) {
371 vsmax = vslow_work_proportion;
372 vsmax *= atomic_read(&slow_work_thread_count);
373 vsmax /= 100;
374
375 prepare_to_wait(&slow_work_thread_wq, &wait,
376 TASK_INTERRUPTIBLE);
377 if (!freezing(current) &&
378 !slow_work_threads_should_exit &&
379 !slow_work_available(vsmax) &&
380 !slow_work_cull)
381 schedule();
382 finish_wait(&slow_work_thread_wq, &wait);
383
384 try_to_freeze();
385
386 vsmax = vslow_work_proportion;
387 vsmax *= atomic_read(&slow_work_thread_count);
388 vsmax /= 100;
389
390 if (slow_work_available(vsmax) && slow_work_execute()) {
391 cond_resched();
392 if (list_empty(&slow_work_queue) &&
393 list_empty(&vslow_work_queue) &&
394 atomic_read(&slow_work_thread_count) >
395 slow_work_min_threads)
396 mod_timer(&slow_work_cull_timer,
397 jiffies + SLOW_WORK_CULL_TIMEOUT);
398 continue;
399 }
400
401 if (slow_work_threads_should_exit)
402 break;
403
404 if (slow_work_cull && slow_work_cull_thread())
405 break;
406 }
407
408 if (atomic_dec_and_test(&slow_work_thread_count))
409 complete_and_exit(&slow_work_last_thread_exited, 0);
410 return 0;
411}
412
413/*
414 * Handle thread cull timer expiration
415 */
416static void slow_work_cull_timeout(unsigned long data)
417{
418 slow_work_cull = true;
419 wake_up(&slow_work_thread_wq);
420}
421
422/*
423 * Get a reference on slow work thread starter
424 */
425static int slow_work_new_thread_get_ref(struct slow_work *work)
426{
427 return 0;
428}
429
430/*
431 * Drop a reference on slow work thread starter
432 */
433static void slow_work_new_thread_put_ref(struct slow_work *work)
434{
435}
436
437/*
438 * Start a new slow work thread
439 */
440static void slow_work_new_thread_execute(struct slow_work *work)
441{
442 struct task_struct *p;
443
444 if (slow_work_threads_should_exit)
445 return;
446
447 if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
448 return;
449
450 if (!mutex_trylock(&slow_work_user_lock))
451 return;
452
453 slow_work_may_not_start_new_thread = true;
454 atomic_inc(&slow_work_thread_count);
455 p = kthread_run(slow_work_thread, NULL, "kslowd");
456 if (IS_ERR(p)) {
457 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
458 if (atomic_dec_and_test(&slow_work_thread_count))
459 BUG(); /* we're running on a slow work thread... */
460 mod_timer(&slow_work_oom_timer,
461 jiffies + SLOW_WORK_OOM_TIMEOUT);
462 } else {
463 /* ratelimit the starting of new threads */
464 mod_timer(&slow_work_oom_timer, jiffies + 1);
465 }
466
467 mutex_unlock(&slow_work_user_lock);
468}
469
470static const struct slow_work_ops slow_work_new_thread_ops = {
471 .get_ref = slow_work_new_thread_get_ref,
472 .put_ref = slow_work_new_thread_put_ref,
473 .execute = slow_work_new_thread_execute,
474};
475
476/*
477 * post-OOM new thread start suppression expiration
478 */
479static void slow_work_oom_timeout(unsigned long data)
480{
481 slow_work_may_not_start_new_thread = false;
482}
483
484#ifdef CONFIG_SYSCTL
485/*
486 * Handle adjustment of the minimum number of threads
487 */
488static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
489 struct file *filp, void __user *buffer,
490 size_t *lenp, loff_t *ppos)
491{
492 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
493 int n;
494
495 if (ret == 0) {
496 mutex_lock(&slow_work_user_lock);
497 if (slow_work_user_count > 0) {
498 /* see if we need to start or stop threads */
499 n = atomic_read(&slow_work_thread_count) -
500 slow_work_min_threads;
501
502 if (n < 0 && !slow_work_may_not_start_new_thread)
503 slow_work_enqueue(&slow_work_new_thread);
504 else if (n > 0)
505 mod_timer(&slow_work_cull_timer,
506 jiffies + SLOW_WORK_CULL_TIMEOUT);
507 }
508 mutex_unlock(&slow_work_user_lock);
509 }
510
511 return ret;
512}
513
514/*
515 * Handle adjustment of the maximum number of threads
516 */
517static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
518 struct file *filp, void __user *buffer,
519 size_t *lenp, loff_t *ppos)
520{
521 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
522 int n;
523
524 if (ret == 0) {
525 mutex_lock(&slow_work_user_lock);
526 if (slow_work_user_count > 0) {
527 /* see if we need to stop threads */
528 n = slow_work_max_threads -
529 atomic_read(&slow_work_thread_count);
530
531 if (n < 0)
532 mod_timer(&slow_work_cull_timer,
533 jiffies + SLOW_WORK_CULL_TIMEOUT);
534 }
535 mutex_unlock(&slow_work_user_lock);
536 }
537
538 return ret;
539}
540#endif /* CONFIG_SYSCTL */
541
542/**
543 * slow_work_register_user - Register a user of the facility
544 *
545 * Register a user of the facility, starting up the initial threads if there
546 * aren't any other users at this point. This will return 0 if successful, or
547 * an error if not.
548 */
549int slow_work_register_user(void)
550{
551 struct task_struct *p;
552 int loop;
553
554 mutex_lock(&slow_work_user_lock);
555
556 if (slow_work_user_count == 0) {
557 printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
558 init_completion(&slow_work_last_thread_exited);
559
560 slow_work_threads_should_exit = false;
561 slow_work_init(&slow_work_new_thread,
562 &slow_work_new_thread_ops);
563 slow_work_may_not_start_new_thread = false;
564 slow_work_cull = false;
565
566 /* start the minimum number of threads */
567 for (loop = 0; loop < slow_work_min_threads; loop++) {
568 atomic_inc(&slow_work_thread_count);
569 p = kthread_run(slow_work_thread, NULL, "kslowd");
570 if (IS_ERR(p))
571 goto error;
572 }
573 printk(KERN_NOTICE "Slow work thread pool: Ready\n");
574 }
575
576 slow_work_user_count++;
577 mutex_unlock(&slow_work_user_lock);
578 return 0;
579
580error:
581 if (atomic_dec_and_test(&slow_work_thread_count))
582 complete(&slow_work_last_thread_exited);
583 if (loop > 0) {
584 printk(KERN_ERR "Slow work thread pool:"
585 " Aborting startup on ENOMEM\n");
586 slow_work_threads_should_exit = true;
587 wake_up_all(&slow_work_thread_wq);
588 wait_for_completion(&slow_work_last_thread_exited);
589 printk(KERN_ERR "Slow work thread pool: Aborted\n");
590 }
591 mutex_unlock(&slow_work_user_lock);
592 return PTR_ERR(p);
593}
594EXPORT_SYMBOL(slow_work_register_user);
595
596/**
597 * slow_work_unregister_user - Unregister a user of the facility
598 *
599 * Unregister a user of the facility, killing all the threads if this was the
600 * last one.
601 */
602void slow_work_unregister_user(void)
603{
604 mutex_lock(&slow_work_user_lock);
605
606 BUG_ON(slow_work_user_count <= 0);
607
608 slow_work_user_count--;
609 if (slow_work_user_count == 0) {
610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
611 slow_work_threads_should_exit = true;
612 wake_up_all(&slow_work_thread_wq);
613 wait_for_completion(&slow_work_last_thread_exited);
614 printk(KERN_NOTICE "Slow work thread pool:"
615 " Shut down complete\n");
616 }
617
618 del_timer_sync(&slow_work_cull_timer);
619
620 mutex_unlock(&slow_work_user_lock);
621}
622EXPORT_SYMBOL(slow_work_unregister_user);
623
624/*
625 * Initialise the slow work facility
626 */
627static int __init init_slow_work(void)
628{
629 unsigned nr_cpus = num_possible_cpus();
630
631 if (slow_work_max_threads < nr_cpus)
632 slow_work_max_threads = nr_cpus;
633#ifdef CONFIG_SYSCTL
634 if (slow_work_max_max_threads < nr_cpus * 2)
635 slow_work_max_max_threads = nr_cpus * 2;
636#endif
637 return 0;
638}
639
640subsys_initcall(init_slow_work);
diff --git a/kernel/smp.c b/kernel/smp.c
index bbedbb7efe32..858baac568ee 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -2,40 +2,82 @@
2 * Generic helpers for smp ipi calls 2 * Generic helpers for smp ipi calls
3 * 3 *
4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008 4 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
5 *
6 */ 5 */
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/percpu.h>
10#include <linux/rcupdate.h> 6#include <linux/rcupdate.h>
11#include <linux/rculist.h> 7#include <linux/rculist.h>
8#include <linux/kernel.h>
9#include <linux/module.h>
10#include <linux/percpu.h>
11#include <linux/init.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/cpu.h>
13 14
14static DEFINE_PER_CPU(struct call_single_queue, call_single_queue); 15static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
15static LIST_HEAD(call_function_queue); 16
16__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock); 17static struct {
18 struct list_head queue;
19 spinlock_t lock;
20} call_function __cacheline_aligned_in_smp =
21 {
22 .queue = LIST_HEAD_INIT(call_function.queue),
23 .lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
24 };
17 25
18enum { 26enum {
19 CSD_FLAG_WAIT = 0x01, 27 CSD_FLAG_LOCK = 0x01,
20 CSD_FLAG_ALLOC = 0x02,
21 CSD_FLAG_LOCK = 0x04,
22}; 28};
23 29
24struct call_function_data { 30struct call_function_data {
25 struct call_single_data csd; 31 struct call_single_data csd;
26 spinlock_t lock; 32 spinlock_t lock;
27 unsigned int refs; 33 unsigned int refs;
28 struct rcu_head rcu_head; 34 cpumask_var_t cpumask;
29 unsigned long cpumask_bits[];
30}; 35};
31 36
32struct call_single_queue { 37struct call_single_queue {
33 struct list_head list; 38 struct list_head list;
34 spinlock_t lock; 39 spinlock_t lock;
40};
41
42static DEFINE_PER_CPU(struct call_function_data, cfd_data) = {
43 .lock = __SPIN_LOCK_UNLOCKED(cfd_data.lock),
44};
45
46static int
47hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
48{
49 long cpu = (long)hcpu;
50 struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
51
52 switch (action) {
53 case CPU_UP_PREPARE:
54 case CPU_UP_PREPARE_FROZEN:
55 if (!alloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
56 cpu_to_node(cpu)))
57 return NOTIFY_BAD;
58 break;
59
60#ifdef CONFIG_CPU_HOTPLUG
61 case CPU_UP_CANCELED:
62 case CPU_UP_CANCELED_FROZEN:
63
64 case CPU_DEAD:
65 case CPU_DEAD_FROZEN:
66 free_cpumask_var(cfd->cpumask);
67 break;
68#endif
69 };
70
71 return NOTIFY_OK;
72}
73
74static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
75 .notifier_call = hotplug_cfd,
35}; 76};
36 77
37static int __cpuinit init_call_single_data(void) 78static int __cpuinit init_call_single_data(void)
38{ 79{
80 void *cpu = (void *)(long)smp_processor_id();
39 int i; 81 int i;
40 82
41 for_each_possible_cpu(i) { 83 for_each_possible_cpu(i) {
@@ -44,29 +86,63 @@ static int __cpuinit init_call_single_data(void)
44 spin_lock_init(&q->lock); 86 spin_lock_init(&q->lock);
45 INIT_LIST_HEAD(&q->list); 87 INIT_LIST_HEAD(&q->list);
46 } 88 }
89
90 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
91 register_cpu_notifier(&hotplug_cfd_notifier);
92
47 return 0; 93 return 0;
48} 94}
49early_initcall(init_call_single_data); 95early_initcall(init_call_single_data);
50 96
51static void csd_flag_wait(struct call_single_data *data) 97/*
98 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
99 *
100 * For non-synchronous ipi calls the csd can still be in use by the
101 * previous function call. For multi-cpu calls its even more interesting
102 * as we'll have to ensure no other cpu is observing our csd.
103 */
104static void csd_lock_wait(struct call_single_data *data)
52{ 105{
53 /* Wait for response */ 106 while (data->flags & CSD_FLAG_LOCK)
54 do {
55 if (!(data->flags & CSD_FLAG_WAIT))
56 break;
57 cpu_relax(); 107 cpu_relax();
58 } while (1); 108}
109
110static void csd_lock(struct call_single_data *data)
111{
112 csd_lock_wait(data);
113 data->flags = CSD_FLAG_LOCK;
114
115 /*
116 * prevent CPU from reordering the above assignment
117 * to ->flags with any subsequent assignments to other
118 * fields of the specified call_single_data structure:
119 */
120 smp_mb();
121}
122
123static void csd_unlock(struct call_single_data *data)
124{
125 WARN_ON(!(data->flags & CSD_FLAG_LOCK));
126
127 /*
128 * ensure we're all done before releasing data:
129 */
130 smp_mb();
131
132 data->flags &= ~CSD_FLAG_LOCK;
59} 133}
60 134
61/* 135/*
62 * Insert a previously allocated call_single_data element for execution 136 * Insert a previously allocated call_single_data element
63 * on the given CPU. data must already have ->func, ->info, and ->flags set. 137 * for execution on the given CPU. data must already have
138 * ->func, ->info, and ->flags set.
64 */ 139 */
65static void generic_exec_single(int cpu, struct call_single_data *data) 140static
141void generic_exec_single(int cpu, struct call_single_data *data, int wait)
66{ 142{
67 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); 143 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
68 int wait = data->flags & CSD_FLAG_WAIT, ipi;
69 unsigned long flags; 144 unsigned long flags;
145 int ipi;
70 146
71 spin_lock_irqsave(&dst->lock, flags); 147 spin_lock_irqsave(&dst->lock, flags);
72 ipi = list_empty(&dst->list); 148 ipi = list_empty(&dst->list);
@@ -74,24 +150,21 @@ static void generic_exec_single(int cpu, struct call_single_data *data)
74 spin_unlock_irqrestore(&dst->lock, flags); 150 spin_unlock_irqrestore(&dst->lock, flags);
75 151
76 /* 152 /*
77 * Make the list addition visible before sending the ipi. 153 * The list addition should be visible before sending the IPI
154 * handler locks the list to pull the entry off it because of
155 * normal cache coherency rules implied by spinlocks.
156 *
157 * If IPIs can go out of order to the cache coherency protocol
158 * in an architecture, sufficient synchronisation should be added
159 * to arch code to make it appear to obey cache coherency WRT
160 * locking and barrier primitives. Generic code isn't really
161 * equipped to do the right thing...
78 */ 162 */
79 smp_mb();
80
81 if (ipi) 163 if (ipi)
82 arch_send_call_function_single_ipi(cpu); 164 arch_send_call_function_single_ipi(cpu);
83 165
84 if (wait) 166 if (wait)
85 csd_flag_wait(data); 167 csd_lock_wait(data);
86}
87
88static void rcu_free_call_data(struct rcu_head *head)
89{
90 struct call_function_data *data;
91
92 data = container_of(head, struct call_function_data, rcu_head);
93
94 kfree(data);
95} 168}
96 169
97/* 170/*
@@ -104,99 +177,83 @@ void generic_smp_call_function_interrupt(void)
104 int cpu = get_cpu(); 177 int cpu = get_cpu();
105 178
106 /* 179 /*
107 * It's ok to use list_for_each_rcu() here even though we may delete 180 * Ensure entry is visible on call_function_queue after we have
108 * 'pos', since list_del_rcu() doesn't clear ->next 181 * entered the IPI. See comment in smp_call_function_many.
182 * If we don't have this, then we may miss an entry on the list
183 * and never get another IPI to process it.
184 */
185 smp_mb();
186
187 /*
188 * It's ok to use list_for_each_rcu() here even though we may
189 * delete 'pos', since list_del_rcu() doesn't clear ->next
109 */ 190 */
110 rcu_read_lock(); 191 list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
111 list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
112 int refs; 192 int refs;
113 193
114 if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits))) 194 spin_lock(&data->lock);
195 if (!cpumask_test_cpu(cpu, data->cpumask)) {
196 spin_unlock(&data->lock);
115 continue; 197 continue;
198 }
199 cpumask_clear_cpu(cpu, data->cpumask);
200 spin_unlock(&data->lock);
116 201
117 data->csd.func(data->csd.info); 202 data->csd.func(data->csd.info);
118 203
119 spin_lock(&data->lock); 204 spin_lock(&data->lock);
120 cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
121 WARN_ON(data->refs == 0); 205 WARN_ON(data->refs == 0);
122 data->refs--; 206 refs = --data->refs;
123 refs = data->refs; 207 if (!refs) {
208 spin_lock(&call_function.lock);
209 list_del_rcu(&data->csd.list);
210 spin_unlock(&call_function.lock);
211 }
124 spin_unlock(&data->lock); 212 spin_unlock(&data->lock);
125 213
126 if (refs) 214 if (refs)
127 continue; 215 continue;
128 216
129 spin_lock(&call_function_lock); 217 csd_unlock(&data->csd);
130 list_del_rcu(&data->csd.list);
131 spin_unlock(&call_function_lock);
132
133 if (data->csd.flags & CSD_FLAG_WAIT) {
134 /*
135 * serialize stores to data with the flag clear
136 * and wakeup
137 */
138 smp_wmb();
139 data->csd.flags &= ~CSD_FLAG_WAIT;
140 }
141 if (data->csd.flags & CSD_FLAG_ALLOC)
142 call_rcu(&data->rcu_head, rcu_free_call_data);
143 } 218 }
144 rcu_read_unlock();
145 219
146 put_cpu(); 220 put_cpu();
147} 221}
148 222
149/* 223/*
150 * Invoked by arch to handle an IPI for call function single. Must be called 224 * Invoked by arch to handle an IPI for call function single. Must be
151 * from the arch with interrupts disabled. 225 * called from the arch with interrupts disabled.
152 */ 226 */
153void generic_smp_call_function_single_interrupt(void) 227void generic_smp_call_function_single_interrupt(void)
154{ 228{
155 struct call_single_queue *q = &__get_cpu_var(call_single_queue); 229 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
230 unsigned int data_flags;
156 LIST_HEAD(list); 231 LIST_HEAD(list);
157 232
158 /* 233 spin_lock(&q->lock);
159 * Need to see other stores to list head for checking whether 234 list_replace_init(&q->list, &list);
160 * list is empty without holding q->lock 235 spin_unlock(&q->lock);
161 */ 236
162 smp_read_barrier_depends(); 237 while (!list_empty(&list)) {
163 while (!list_empty(&q->list)) { 238 struct call_single_data *data;
164 unsigned int data_flags; 239
165 240 data = list_entry(list.next, struct call_single_data, list);
166 spin_lock(&q->lock); 241 list_del(&data->list);
167 list_replace_init(&q->list, &list); 242
168 spin_unlock(&q->lock); 243 /*
169 244 * 'data' can be invalid after this call if flags == 0
170 while (!list_empty(&list)) { 245 * (when called through generic_exec_single()),
171 struct call_single_data *data; 246 * so save them away before making the call:
172 247 */
173 data = list_entry(list.next, struct call_single_data, 248 data_flags = data->flags;
174 list); 249
175 list_del(&data->list); 250 data->func(data->info);
176 251
177 /*
178 * 'data' can be invalid after this call if
179 * flags == 0 (when called through
180 * generic_exec_single(), so save them away before
181 * making the call.
182 */
183 data_flags = data->flags;
184
185 data->func(data->info);
186
187 if (data_flags & CSD_FLAG_WAIT) {
188 smp_wmb();
189 data->flags &= ~CSD_FLAG_WAIT;
190 } else if (data_flags & CSD_FLAG_LOCK) {
191 smp_wmb();
192 data->flags &= ~CSD_FLAG_LOCK;
193 } else if (data_flags & CSD_FLAG_ALLOC)
194 kfree(data);
195 }
196 /* 252 /*
197 * See comment on outer loop 253 * Unlocked CSDs are valid through generic_exec_single():
198 */ 254 */
199 smp_read_barrier_depends(); 255 if (data_flags & CSD_FLAG_LOCK)
256 csd_unlock(data);
200 } 257 }
201} 258}
202 259
@@ -215,65 +272,45 @@ static DEFINE_PER_CPU(struct call_single_data, csd_data);
215int smp_call_function_single(int cpu, void (*func) (void *info), void *info, 272int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
216 int wait) 273 int wait)
217{ 274{
218 struct call_single_data d; 275 struct call_single_data d = {
276 .flags = 0,
277 };
219 unsigned long flags; 278 unsigned long flags;
220 /* prevent preemption and reschedule on another processor, 279 int this_cpu;
221 as well as CPU removal */
222 int me = get_cpu();
223 int err = 0; 280 int err = 0;
224 281
282 /*
283 * prevent preemption and reschedule on another processor,
284 * as well as CPU removal
285 */
286 this_cpu = get_cpu();
287
225 /* Can deadlock when called with interrupts disabled */ 288 /* Can deadlock when called with interrupts disabled */
226 WARN_ON(irqs_disabled()); 289 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
227 290
228 if (cpu == me) { 291 if (cpu == this_cpu) {
229 local_irq_save(flags); 292 local_irq_save(flags);
230 func(info); 293 func(info);
231 local_irq_restore(flags); 294 local_irq_restore(flags);
232 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 295 } else {
233 struct call_single_data *data; 296 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
297 struct call_single_data *data = &d;
298
299 if (!wait)
300 data = &__get_cpu_var(csd_data);
234 301
235 if (!wait) { 302 csd_lock(data);
236 /* 303
237 * We are calling a function on a single CPU 304 data->func = func;
238 * and we are not going to wait for it to finish. 305 data->info = info;
239 * We first try to allocate the data, but if we 306 generic_exec_single(cpu, data, wait);
240 * fail, we fall back to use a per cpu data to pass
241 * the information to that CPU. Since all callers
242 * of this code will use the same data, we must
243 * synchronize the callers to prevent a new caller
244 * from corrupting the data before the callee
245 * can access it.
246 *
247 * The CSD_FLAG_LOCK is used to let us know when
248 * the IPI handler is done with the data.
249 * The first caller will set it, and the callee
250 * will clear it. The next caller must wait for
251 * it to clear before we set it again. This
252 * will make sure the callee is done with the
253 * data before a new caller will use it.
254 */
255 data = kmalloc(sizeof(*data), GFP_ATOMIC);
256 if (data)
257 data->flags = CSD_FLAG_ALLOC;
258 else {
259 data = &per_cpu(csd_data, me);
260 while (data->flags & CSD_FLAG_LOCK)
261 cpu_relax();
262 data->flags = CSD_FLAG_LOCK;
263 }
264 } else { 307 } else {
265 data = &d; 308 err = -ENXIO; /* CPU not online */
266 data->flags = CSD_FLAG_WAIT;
267 } 309 }
268
269 data->func = func;
270 data->info = info;
271 generic_exec_single(cpu, data);
272 } else {
273 err = -ENXIO; /* CPU not online */
274 } 310 }
275 311
276 put_cpu(); 312 put_cpu();
313
277 return err; 314 return err;
278} 315}
279EXPORT_SYMBOL(smp_call_function_single); 316EXPORT_SYMBOL(smp_call_function_single);
@@ -283,23 +320,26 @@ EXPORT_SYMBOL(smp_call_function_single);
283 * @cpu: The CPU to run on. 320 * @cpu: The CPU to run on.
284 * @data: Pre-allocated and setup data structure 321 * @data: Pre-allocated and setup data structure
285 * 322 *
286 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated 323 * Like smp_call_function_single(), but allow caller to pass in a
287 * data structure. Useful for embedding @data inside other structures, for 324 * pre-allocated data structure. Useful for embedding @data inside
288 * instance. 325 * other structures, for instance.
289 *
290 */ 326 */
291void __smp_call_function_single(int cpu, struct call_single_data *data) 327void __smp_call_function_single(int cpu, struct call_single_data *data,
328 int wait)
292{ 329{
330 csd_lock(data);
331
293 /* Can deadlock when called with interrupts disabled */ 332 /* Can deadlock when called with interrupts disabled */
294 WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled()); 333 WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress);
295 334
296 generic_exec_single(cpu, data); 335 generic_exec_single(cpu, data, wait);
297} 336}
298 337
299/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */ 338/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */
339
300#ifndef arch_send_call_function_ipi_mask 340#ifndef arch_send_call_function_ipi_mask
301#define arch_send_call_function_ipi_mask(maskp) \ 341# define arch_send_call_function_ipi_mask(maskp) \
302 arch_send_call_function_ipi(*(maskp)) 342 arch_send_call_function_ipi(*(maskp))
303#endif 343#endif
304 344
305/** 345/**
@@ -307,7 +347,8 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
307 * @mask: The set of cpus to run on (only runs on online subset). 347 * @mask: The set of cpus to run on (only runs on online subset).
308 * @func: The function to run. This must be fast and non-blocking. 348 * @func: The function to run. This must be fast and non-blocking.
309 * @info: An arbitrary pointer to pass to the function. 349 * @info: An arbitrary pointer to pass to the function.
310 * @wait: If true, wait (atomically) until function has completed on other CPUs. 350 * @wait: If true, wait (atomically) until function has completed
351 * on other CPUs.
311 * 352 *
312 * If @wait is true, then returns once @func has returned. Note that @wait 353 * If @wait is true, then returns once @func has returned. Note that @wait
313 * will be implicitly turned on in case of allocation failures, since 354 * will be implicitly turned on in case of allocation failures, since
@@ -318,27 +359,27 @@ void __smp_call_function_single(int cpu, struct call_single_data *data)
318 * must be disabled when calling this function. 359 * must be disabled when calling this function.
319 */ 360 */
320void smp_call_function_many(const struct cpumask *mask, 361void smp_call_function_many(const struct cpumask *mask,
321 void (*func)(void *), void *info, 362 void (*func)(void *), void *info, bool wait)
322 bool wait)
323{ 363{
324 struct call_function_data *data; 364 struct call_function_data *data;
325 unsigned long flags; 365 unsigned long flags;
326 int cpu, next_cpu; 366 int cpu, next_cpu, this_cpu = smp_processor_id();
327 367
328 /* Can deadlock when called with interrupts disabled */ 368 /* Can deadlock when called with interrupts disabled */
329 WARN_ON(irqs_disabled()); 369 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress);
330 370
331 /* So, what's a CPU they want? Ignoring this one. */ 371 /* So, what's a CPU they want? Ignoring this one. */
332 cpu = cpumask_first_and(mask, cpu_online_mask); 372 cpu = cpumask_first_and(mask, cpu_online_mask);
333 if (cpu == smp_processor_id()) 373 if (cpu == this_cpu)
334 cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 374 cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
375
335 /* No online cpus? We're done. */ 376 /* No online cpus? We're done. */
336 if (cpu >= nr_cpu_ids) 377 if (cpu >= nr_cpu_ids)
337 return; 378 return;
338 379
339 /* Do we have another CPU which isn't us? */ 380 /* Do we have another CPU which isn't us? */
340 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask); 381 next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
341 if (next_cpu == smp_processor_id()) 382 if (next_cpu == this_cpu)
342 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask); 383 next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
343 384
344 /* Fastpath: do that cpu by itself. */ 385 /* Fastpath: do that cpu by itself. */
@@ -347,43 +388,40 @@ void smp_call_function_many(const struct cpumask *mask,
347 return; 388 return;
348 } 389 }
349 390
350 data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC); 391 data = &__get_cpu_var(cfd_data);
351 if (unlikely(!data)) { 392 csd_lock(&data->csd);
352 /* Slow path. */
353 for_each_online_cpu(cpu) {
354 if (cpu == smp_processor_id())
355 continue;
356 if (cpumask_test_cpu(cpu, mask))
357 smp_call_function_single(cpu, func, info, wait);
358 }
359 return;
360 }
361 393
362 spin_lock_init(&data->lock); 394 spin_lock_irqsave(&data->lock, flags);
363 data->csd.flags = CSD_FLAG_ALLOC;
364 if (wait)
365 data->csd.flags |= CSD_FLAG_WAIT;
366 data->csd.func = func; 395 data->csd.func = func;
367 data->csd.info = info; 396 data->csd.info = info;
368 cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask); 397 cpumask_and(data->cpumask, mask, cpu_online_mask);
369 cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits)); 398 cpumask_clear_cpu(this_cpu, data->cpumask);
370 data->refs = cpumask_weight(to_cpumask(data->cpumask_bits)); 399 data->refs = cpumask_weight(data->cpumask);
371 400
372 spin_lock_irqsave(&call_function_lock, flags); 401 spin_lock(&call_function.lock);
373 list_add_tail_rcu(&data->csd.list, &call_function_queue); 402 /*
374 spin_unlock_irqrestore(&call_function_lock, flags); 403 * Place entry at the _HEAD_ of the list, so that any cpu still
404 * observing the entry in generic_smp_call_function_interrupt()
405 * will not miss any other list entries:
406 */
407 list_add_rcu(&data->csd.list, &call_function.queue);
408 spin_unlock(&call_function.lock);
409
410 spin_unlock_irqrestore(&data->lock, flags);
375 411
376 /* 412 /*
377 * Make the list addition visible before sending the ipi. 413 * Make the list addition visible before sending the ipi.
414 * (IPIs must obey or appear to obey normal Linux cache
415 * coherency rules -- see comment in generic_exec_single).
378 */ 416 */
379 smp_mb(); 417 smp_mb();
380 418
381 /* Send a message to all CPUs in the map */ 419 /* Send a message to all CPUs in the map */
382 arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits)); 420 arch_send_call_function_ipi_mask(data->cpumask);
383 421
384 /* optionally wait for the CPUs to complete */ 422 /* Optionally wait for the CPUs to complete */
385 if (wait) 423 if (wait)
386 csd_flag_wait(&data->csd); 424 csd_lock_wait(&data->csd);
387} 425}
388EXPORT_SYMBOL(smp_call_function_many); 426EXPORT_SYMBOL(smp_call_function_many);
389 427
@@ -391,7 +429,8 @@ EXPORT_SYMBOL(smp_call_function_many);
391 * smp_call_function(): Run a function on all other CPUs. 429 * smp_call_function(): Run a function on all other CPUs.
392 * @func: The function to run. This must be fast and non-blocking. 430 * @func: The function to run. This must be fast and non-blocking.
393 * @info: An arbitrary pointer to pass to the function. 431 * @info: An arbitrary pointer to pass to the function.
394 * @wait: If true, wait (atomically) until function has completed on other CPUs. 432 * @wait: If true, wait (atomically) until function has completed
433 * on other CPUs.
395 * 434 *
396 * Returns 0. 435 * Returns 0.
397 * 436 *
@@ -407,26 +446,27 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
407 preempt_disable(); 446 preempt_disable();
408 smp_call_function_many(cpu_online_mask, func, info, wait); 447 smp_call_function_many(cpu_online_mask, func, info, wait);
409 preempt_enable(); 448 preempt_enable();
449
410 return 0; 450 return 0;
411} 451}
412EXPORT_SYMBOL(smp_call_function); 452EXPORT_SYMBOL(smp_call_function);
413 453
414void ipi_call_lock(void) 454void ipi_call_lock(void)
415{ 455{
416 spin_lock(&call_function_lock); 456 spin_lock(&call_function.lock);
417} 457}
418 458
419void ipi_call_unlock(void) 459void ipi_call_unlock(void)
420{ 460{
421 spin_unlock(&call_function_lock); 461 spin_unlock(&call_function.lock);
422} 462}
423 463
424void ipi_call_lock_irq(void) 464void ipi_call_lock_irq(void)
425{ 465{
426 spin_lock_irq(&call_function_lock); 466 spin_lock_irq(&call_function.lock);
427} 467}
428 468
429void ipi_call_unlock_irq(void) 469void ipi_call_unlock_irq(void)
430{ 470{
431 spin_unlock_irq(&call_function_lock); 471 spin_unlock_irq(&call_function.lock);
432} 472}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9041ea7948fe..ea23ec087ee9 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -180,7 +180,7 @@ asmlinkage void __do_softirq(void)
180 account_system_vtime(current); 180 account_system_vtime(current);
181 181
182 __local_bh_disable((unsigned long)__builtin_return_address(0)); 182 __local_bh_disable((unsigned long)__builtin_return_address(0));
183 trace_softirq_enter(); 183 lockdep_softirq_enter();
184 184
185 cpu = smp_processor_id(); 185 cpu = smp_processor_id();
186restart: 186restart:
@@ -220,7 +220,7 @@ restart:
220 if (pending) 220 if (pending)
221 wakeup_softirqd(); 221 wakeup_softirqd();
222 222
223 trace_softirq_exit(); 223 lockdep_softirq_exit();
224 224
225 account_system_vtime(current); 225 account_system_vtime(current);
226 _local_bh_enable(); 226 _local_bh_enable();
@@ -496,7 +496,7 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
496 cp->flags = 0; 496 cp->flags = 0;
497 cp->priv = softirq; 497 cp->priv = softirq;
498 498
499 __smp_call_function_single(cpu, cp); 499 __smp_call_function_single(cpu, cp, 0);
500 return 0; 500 return 0;
501 } 501 }
502 return 1; 502 return 1;
@@ -796,6 +796,11 @@ int __init __weak early_irq_init(void)
796 return 0; 796 return 0;
797} 797}
798 798
799int __init __weak arch_probe_nr_irqs(void)
800{
801 return 0;
802}
803
799int __init __weak arch_early_irq_init(void) 804int __init __weak arch_early_irq_init(void)
800{ 805{
801 return 0; 806 return 0;
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 29ab20749dd3..7932653c4ebd 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -121,7 +121,8 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
121 local_irq_save(flags); 121 local_irq_save(flags);
122 preempt_disable(); 122 preempt_disable();
123 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); 123 rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_);
124 LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); 124 LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock,
125 _raw_read_lock_flags, &flags);
125 return flags; 126 return flags;
126} 127}
127EXPORT_SYMBOL(_read_lock_irqsave); 128EXPORT_SYMBOL(_read_lock_irqsave);
@@ -151,7 +152,8 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
151 local_irq_save(flags); 152 local_irq_save(flags);
152 preempt_disable(); 153 preempt_disable();
153 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); 154 rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_);
154 LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); 155 LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock,
156 _raw_write_lock_flags, &flags);
155 return flags; 157 return flags;
156} 158}
157EXPORT_SYMBOL(_write_lock_irqsave); 159EXPORT_SYMBOL(_write_lock_irqsave);
@@ -299,16 +301,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
299 local_irq_save(flags); 301 local_irq_save(flags);
300 preempt_disable(); 302 preempt_disable();
301 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 303 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
302 /* 304 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
303 * On lockdep we dont want the hand-coded irq-enable of 305 _raw_spin_lock_flags, &flags);
304 * _raw_spin_lock_flags() code, because lockdep assumes
305 * that interrupts are not re-enabled during lock-acquire:
306 */
307#ifdef CONFIG_LOCKDEP
308 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
309#else
310 _raw_spin_lock_flags(lock, &flags);
311#endif
312 return flags; 306 return flags;
313} 307}
314EXPORT_SYMBOL(_spin_lock_irqsave_nested); 308EXPORT_SYMBOL(_spin_lock_irqsave_nested);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 0cd415ee62a2..912823e2a11b 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -44,7 +44,7 @@ static DEFINE_MUTEX(setup_lock);
44static int refcount; 44static int refcount;
45static struct workqueue_struct *stop_machine_wq; 45static struct workqueue_struct *stop_machine_wq;
46static struct stop_machine_data active, idle; 46static struct stop_machine_data active, idle;
47static const cpumask_t *active_cpus; 47static const struct cpumask *active_cpus;
48static void *stop_machine_work; 48static void *stop_machine_work;
49 49
50static void set_state(enum stopmachine_state newstate) 50static void set_state(enum stopmachine_state newstate)
@@ -170,7 +170,7 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus)
170 * doesn't hit this CPU until we're ready. */ 170 * doesn't hit this CPU until we're ready. */
171 get_cpu(); 171 get_cpu();
172 for_each_online_cpu(i) { 172 for_each_online_cpu(i) {
173 sm_work = percpu_ptr(stop_machine_work, i); 173 sm_work = per_cpu_ptr(stop_machine_work, i);
174 INIT_WORK(sm_work, stop_cpu); 174 INIT_WORK(sm_work, stop_cpu);
175 queue_work_on(i, stop_machine_wq, sm_work); 175 queue_work_on(i, stop_machine_wq, sm_work);
176 } 176 }
diff --git a/kernel/sys.c b/kernel/sys.c
index 37f458e6882a..51dbb55604e8 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -34,6 +34,7 @@
34#include <linux/seccomp.h> 34#include <linux/seccomp.h>
35#include <linux/cpu.h> 35#include <linux/cpu.h>
36#include <linux/ptrace.h> 36#include <linux/ptrace.h>
37#include <linux/fs_struct.h>
37 38
38#include <linux/compat.h> 39#include <linux/compat.h>
39#include <linux/syscalls.h> 40#include <linux/syscalls.h>
@@ -1013,10 +1014,8 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1013 if (err) 1014 if (err)
1014 goto out; 1015 goto out;
1015 1016
1016 if (task_pgrp(p) != pgrp) { 1017 if (task_pgrp(p) != pgrp)
1017 change_pid(p, PIDTYPE_PGID, pgrp); 1018 change_pid(p, PIDTYPE_PGID, pgrp);
1018 set_task_pgrp(p, pid_nr(pgrp));
1019 }
1020 1019
1021 err = 0; 1020 err = 0;
1022out: 1021out:
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c5ef44ff850f..82350f8f04f6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -48,6 +48,7 @@
48#include <linux/acpi.h> 48#include <linux/acpi.h>
49#include <linux/reboot.h> 49#include <linux/reboot.h>
50#include <linux/ftrace.h> 50#include <linux/ftrace.h>
51#include <linux/slow-work.h>
51 52
52#include <asm/uaccess.h> 53#include <asm/uaccess.h>
53#include <asm/processor.h> 54#include <asm/processor.h>
@@ -95,12 +96,9 @@ static int sixty = 60;
95static int neg_one = -1; 96static int neg_one = -1;
96#endif 97#endif
97 98
98#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
99static int two = 2;
100#endif
101
102static int zero; 99static int zero;
103static int one = 1; 100static int one = 1;
101static int two = 2;
104static unsigned long one_ul = 1; 102static unsigned long one_ul = 1;
105static int one_hundred = 100; 103static int one_hundred = 100;
106 104
@@ -900,6 +898,14 @@ static struct ctl_table kern_table[] = {
900 .proc_handler = &scan_unevictable_handler, 898 .proc_handler = &scan_unevictable_handler,
901 }, 899 },
902#endif 900#endif
901#ifdef CONFIG_SLOW_WORK
902 {
903 .ctl_name = CTL_UNNUMBERED,
904 .procname = "slow-work",
905 .mode = 0555,
906 .child = slow_work_sysctls,
907 },
908#endif
903/* 909/*
904 * NOTE: do not add new entries to this table unless you have read 910 * NOTE: do not add new entries to this table unless you have read
905 * Documentation/sysctl/ctl_unnumbered.txt 911 * Documentation/sysctl/ctl_unnumbered.txt
@@ -1010,7 +1016,7 @@ static struct ctl_table vm_table[] = {
1010 .data = &dirty_expire_interval, 1016 .data = &dirty_expire_interval,
1011 .maxlen = sizeof(dirty_expire_interval), 1017 .maxlen = sizeof(dirty_expire_interval),
1012 .mode = 0644, 1018 .mode = 0644,
1013 .proc_handler = &proc_dointvec_userhz_jiffies, 1019 .proc_handler = &proc_dointvec,
1014 }, 1020 },
1015 { 1021 {
1016 .ctl_name = VM_NR_PDFLUSH_THREADS, 1022 .ctl_name = VM_NR_PDFLUSH_THREADS,
@@ -1373,10 +1379,7 @@ static struct ctl_table fs_table[] = {
1373 .data = &lease_break_time, 1379 .data = &lease_break_time,
1374 .maxlen = sizeof(int), 1380 .maxlen = sizeof(int),
1375 .mode = 0644, 1381 .mode = 0644,
1376 .proc_handler = &proc_dointvec_minmax, 1382 .proc_handler = &proc_dointvec,
1377 .strategy = &sysctl_intvec,
1378 .extra1 = &zero,
1379 .extra2 = &two,
1380 }, 1383 },
1381#endif 1384#endif
1382#ifdef CONFIG_AIO 1385#ifdef CONFIG_AIO
@@ -1417,7 +1420,10 @@ static struct ctl_table fs_table[] = {
1417 .data = &suid_dumpable, 1420 .data = &suid_dumpable,
1418 .maxlen = sizeof(int), 1421 .maxlen = sizeof(int),
1419 .mode = 0644, 1422 .mode = 0644,
1420 .proc_handler = &proc_dointvec, 1423 .proc_handler = &proc_dointvec_minmax,
1424 .strategy = &sysctl_intvec,
1425 .extra1 = &zero,
1426 .extra2 = &two,
1421 }, 1427 },
1422#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) 1428#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
1423 { 1429 {
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index fafeb48f27c0..b38423ca711a 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -219,6 +219,7 @@ static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" }, 219 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" }, 220 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" }, 221 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
222 { NET_IPV4_CONF_ARP_NOTIFY, "arp_notify" },
222 {} 223 {}
223}; 224};
224 225
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
index 905b0b50792d..0b0a6366c9d4 100644
--- a/kernel/time/Makefile
+++ b/kernel/time/Makefile
@@ -1,4 +1,4 @@
1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o 1obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o
2 2
3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o 3obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o
4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o 4obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ea2f48af83cf..d13be216a790 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -68,6 +68,17 @@ void clockevents_set_mode(struct clock_event_device *dev,
68 if (dev->mode != mode) { 68 if (dev->mode != mode) {
69 dev->set_mode(mode, dev); 69 dev->set_mode(mode, dev);
70 dev->mode = mode; 70 dev->mode = mode;
71
72 /*
73 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
74 * on it, so fix it up and emit a warning:
75 */
76 if (mode == CLOCK_EVT_MODE_ONESHOT) {
77 if (unlikely(!dev->mult)) {
78 dev->mult = 1;
79 WARN_ON(1);
80 }
81 }
71 } 82 }
72} 83}
73 84
@@ -168,15 +179,6 @@ void clockevents_register_device(struct clock_event_device *dev)
168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 179 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
169 BUG_ON(!dev->cpumask); 180 BUG_ON(!dev->cpumask);
170 181
171 /*
172 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
173 * on it, so fix it up and emit a warning:
174 */
175 if (unlikely(!dev->mult)) {
176 dev->mult = 1;
177 WARN_ON(1);
178 }
179
180 spin_lock(&clockevents_lock); 182 spin_lock(&clockevents_lock);
181 183
182 list_add(&dev->list, &clockevent_devices); 184 list_add(&dev->list, &clockevent_devices);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ca89e1593f08..c46c931a7fe7 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -31,6 +31,82 @@
31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */ 31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
32#include <linux/tick.h> 32#include <linux/tick.h>
33 33
34void timecounter_init(struct timecounter *tc,
35 const struct cyclecounter *cc,
36 u64 start_tstamp)
37{
38 tc->cc = cc;
39 tc->cycle_last = cc->read(cc);
40 tc->nsec = start_tstamp;
41}
42EXPORT_SYMBOL(timecounter_init);
43
44/**
45 * timecounter_read_delta - get nanoseconds since last call of this function
46 * @tc: Pointer to time counter
47 *
48 * When the underlying cycle counter runs over, this will be handled
49 * correctly as long as it does not run over more than once between
50 * calls.
51 *
52 * The first call to this function for a new time counter initializes
53 * the time tracking and returns an undefined result.
54 */
55static u64 timecounter_read_delta(struct timecounter *tc)
56{
57 cycle_t cycle_now, cycle_delta;
58 u64 ns_offset;
59
60 /* read cycle counter: */
61 cycle_now = tc->cc->read(tc->cc);
62
63 /* calculate the delta since the last timecounter_read_delta(): */
64 cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask;
65
66 /* convert to nanoseconds: */
67 ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta);
68
69 /* update time stamp of timecounter_read_delta() call: */
70 tc->cycle_last = cycle_now;
71
72 return ns_offset;
73}
74
75u64 timecounter_read(struct timecounter *tc)
76{
77 u64 nsec;
78
79 /* increment time by nanoseconds since last call */
80 nsec = timecounter_read_delta(tc);
81 nsec += tc->nsec;
82 tc->nsec = nsec;
83
84 return nsec;
85}
86EXPORT_SYMBOL(timecounter_read);
87
88u64 timecounter_cyc2time(struct timecounter *tc,
89 cycle_t cycle_tstamp)
90{
91 u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
92 u64 nsec;
93
94 /*
95 * Instead of always treating cycle_tstamp as more recent
96 * than tc->cycle_last, detect when it is too far in the
97 * future and treat it as old time stamp instead.
98 */
99 if (cycle_delta > tc->cc->mask / 2) {
100 cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
101 nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta);
102 } else {
103 nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec;
104 }
105
106 return nsec;
107}
108EXPORT_SYMBOL(timecounter_cyc2time);
109
34/* XXX - Would like a better way for initializing curr_clocksource */ 110/* XXX - Would like a better way for initializing curr_clocksource */
35extern struct clocksource clocksource_jiffies; 111extern struct clocksource clocksource_jiffies;
36 112
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f5f793d92415..7fc64375ff43 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -1,71 +1,129 @@
1/* 1/*
2 * linux/kernel/time/ntp.c
3 *
4 * NTP state machine interfaces and logic. 2 * NTP state machine interfaces and logic.
5 * 3 *
6 * This code was mainly moved from kernel/timer.c and kernel/time.c 4 * This code was mainly moved from kernel/timer.c and kernel/time.c
7 * Please see those files for relevant copyright info and historical 5 * Please see those files for relevant copyright info and historical
8 * changelogs. 6 * changelogs.
9 */ 7 */
10
11#include <linux/mm.h>
12#include <linux/time.h>
13#include <linux/timex.h>
14#include <linux/jiffies.h>
15#include <linux/hrtimer.h>
16#include <linux/capability.h> 8#include <linux/capability.h>
17#include <linux/math64.h>
18#include <linux/clocksource.h> 9#include <linux/clocksource.h>
19#include <linux/workqueue.h> 10#include <linux/workqueue.h>
20#include <asm/timex.h> 11#include <linux/hrtimer.h>
12#include <linux/jiffies.h>
13#include <linux/math64.h>
14#include <linux/timex.h>
15#include <linux/time.h>
16#include <linux/mm.h>
21 17
22/* 18/*
23 * Timekeeping variables 19 * NTP timekeeping variables:
24 */ 20 */
25unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
26unsigned long tick_nsec; /* ACTHZ period (nsec) */
27u64 tick_length;
28static u64 tick_length_base;
29 21
30static struct hrtimer leap_timer; 22/* USER_HZ period (usecs): */
23unsigned long tick_usec = TICK_USEC;
31 24
32#define MAX_TICKADJ 500 /* microsecs */ 25/* ACTHZ period (nsecs): */
33#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ 26unsigned long tick_nsec;
34 NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) 27
28u64 tick_length;
29static u64 tick_length_base;
30
31static struct hrtimer leap_timer;
32
33#define MAX_TICKADJ 500LL /* usecs */
34#define MAX_TICKADJ_SCALED \
35 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
35 36
36/* 37/*
37 * phase-lock loop variables 38 * phase-lock loop variables
38 */ 39 */
39/* TIME_ERROR prevents overwriting the CMOS clock */
40static int time_state = TIME_OK; /* clock synchronization status */
41int time_status = STA_UNSYNC; /* clock status bits */
42static long time_tai; /* TAI offset (s) */
43static s64 time_offset; /* time adjustment (ns) */
44static long time_constant = 2; /* pll time constant */
45long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
46long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
47static s64 time_freq; /* frequency offset (scaled ns/s)*/
48static long time_reftime; /* time at last adjustment (s) */
49long time_adjust;
50static long ntp_tick_adj;
51 40
41/*
42 * clock synchronization status
43 *
44 * (TIME_ERROR prevents overwriting the CMOS clock)
45 */
46static int time_state = TIME_OK;
47
48/* clock status bits: */
49int time_status = STA_UNSYNC;
50
51/* TAI offset (secs): */
52static long time_tai;
53
54/* time adjustment (nsecs): */
55static s64 time_offset;
56
57/* pll time constant: */
58static long time_constant = 2;
59
60/* maximum error (usecs): */
61long time_maxerror = NTP_PHASE_LIMIT;
62
63/* estimated error (usecs): */
64long time_esterror = NTP_PHASE_LIMIT;
65
66/* frequency offset (scaled nsecs/secs): */
67static s64 time_freq;
68
69/* time at last adjustment (secs): */
70static long time_reftime;
71
72long time_adjust;
73
74/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
75static s64 ntp_tick_adj;
76
77/*
78 * NTP methods:
79 */
80
81/*
82 * Update (tick_length, tick_length_base, tick_nsec), based
83 * on (tick_usec, ntp_tick_adj, time_freq):
84 */
52static void ntp_update_frequency(void) 85static void ntp_update_frequency(void)
53{ 86{
54 u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) 87 u64 second_length;
55 << NTP_SCALE_SHIFT; 88 u64 new_base;
56 second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT; 89
57 second_length += time_freq; 90 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
91 << NTP_SCALE_SHIFT;
92
93 second_length += ntp_tick_adj;
94 second_length += time_freq;
58 95
59 tick_length_base = second_length; 96 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
97 new_base = div_u64(second_length, NTP_INTERVAL_FREQ);
60 98
61 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT; 99 /*
62 tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); 100 * Don't wait for the next second_overflow, apply
101 * the change to the tick length immediately:
102 */
103 tick_length += new_base - tick_length_base;
104 tick_length_base = new_base;
105}
106
107static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
108{
109 time_status &= ~STA_MODE;
110
111 if (secs < MINSEC)
112 return 0;
113
114 if (!(time_status & STA_FLL) && (secs <= MAXSEC))
115 return 0;
116
117 time_status |= STA_MODE;
118
119 return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
63} 120}
64 121
65static void ntp_update_offset(long offset) 122static void ntp_update_offset(long offset)
66{ 123{
67 long mtemp;
68 s64 freq_adj; 124 s64 freq_adj;
125 s64 offset64;
126 long secs;
69 127
70 if (!(time_status & STA_PLL)) 128 if (!(time_status & STA_PLL))
71 return; 129 return;
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
84 * Select how the frequency is to be controlled 142 * Select how the frequency is to be controlled
85 * and in which mode (PLL or FLL). 143 * and in which mode (PLL or FLL).
86 */ 144 */
87 if (time_status & STA_FREQHOLD || time_reftime == 0) 145 secs = xtime.tv_sec - time_reftime;
88 time_reftime = xtime.tv_sec; 146 if (unlikely(time_status & STA_FREQHOLD))
89 mtemp = xtime.tv_sec - time_reftime; 147 secs = 0;
148
90 time_reftime = xtime.tv_sec; 149 time_reftime = xtime.tv_sec;
91 150
92 freq_adj = (s64)offset * mtemp; 151 offset64 = offset;
93 freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant); 152 freq_adj = (offset64 * secs) <<
94 time_status &= ~STA_MODE; 153 (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
95 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
96 freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
97 mtemp);
98 time_status |= STA_MODE;
99 }
100 freq_adj += time_freq;
101 freq_adj = min(freq_adj, MAXFREQ_SCALED);
102 time_freq = max(freq_adj, -MAXFREQ_SCALED);
103 154
104 time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); 155 freq_adj += ntp_update_offset_fll(offset64, secs);
156
157 freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED);
158
159 time_freq = max(freq_adj, -MAXFREQ_SCALED);
160
161 time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
105} 162}
106 163
107/** 164/**
@@ -111,15 +168,15 @@ static void ntp_update_offset(long offset)
111 */ 168 */
112void ntp_clear(void) 169void ntp_clear(void)
113{ 170{
114 time_adjust = 0; /* stop active adjtime() */ 171 time_adjust = 0; /* stop active adjtime() */
115 time_status |= STA_UNSYNC; 172 time_status |= STA_UNSYNC;
116 time_maxerror = NTP_PHASE_LIMIT; 173 time_maxerror = NTP_PHASE_LIMIT;
117 time_esterror = NTP_PHASE_LIMIT; 174 time_esterror = NTP_PHASE_LIMIT;
118 175
119 ntp_update_frequency(); 176 ntp_update_frequency();
120 177
121 tick_length = tick_length_base; 178 tick_length = tick_length_base;
122 time_offset = 0; 179 time_offset = 0;
123} 180}
124 181
125/* 182/*
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
140 xtime.tv_sec--; 197 xtime.tv_sec--;
141 wall_to_monotonic.tv_sec++; 198 wall_to_monotonic.tv_sec++;
142 time_state = TIME_OOP; 199 time_state = TIME_OOP;
143 printk(KERN_NOTICE "Clock: " 200 printk(KERN_NOTICE
144 "inserting leap second 23:59:60 UTC\n"); 201 "Clock: inserting leap second 23:59:60 UTC\n");
145 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); 202 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
146 res = HRTIMER_RESTART; 203 res = HRTIMER_RESTART;
147 break; 204 break;
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
150 time_tai--; 207 time_tai--;
151 wall_to_monotonic.tv_sec--; 208 wall_to_monotonic.tv_sec--;
152 time_state = TIME_WAIT; 209 time_state = TIME_WAIT;
153 printk(KERN_NOTICE "Clock: " 210 printk(KERN_NOTICE
154 "deleting leap second 23:59:59 UTC\n"); 211 "Clock: deleting leap second 23:59:59 UTC\n");
155 break; 212 break;
156 case TIME_OOP: 213 case TIME_OOP:
157 time_tai++; 214 time_tai++;
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
179 */ 236 */
180void second_overflow(void) 237void second_overflow(void)
181{ 238{
182 s64 time_adj; 239 s64 delta;
183 240
184 /* Bump the maxerror field */ 241 /* Bump the maxerror field */
185 time_maxerror += MAXFREQ / NSEC_PER_USEC; 242 time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -192,24 +249,30 @@ void second_overflow(void)
192 * Compute the phase adjustment for the next second. The offset is 249 * Compute the phase adjustment for the next second. The offset is
193 * reduced by a fixed factor times the time constant. 250 * reduced by a fixed factor times the time constant.
194 */ 251 */
195 tick_length = tick_length_base; 252 tick_length = tick_length_base;
196 time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); 253
197 time_offset -= time_adj; 254 delta = shift_right(time_offset, SHIFT_PLL + time_constant);
198 tick_length += time_adj; 255 time_offset -= delta;
199 256 tick_length += delta;
200 if (unlikely(time_adjust)) { 257
201 if (time_adjust > MAX_TICKADJ) { 258 if (!time_adjust)
202 time_adjust -= MAX_TICKADJ; 259 return;
203 tick_length += MAX_TICKADJ_SCALED; 260
204 } else if (time_adjust < -MAX_TICKADJ) { 261 if (time_adjust > MAX_TICKADJ) {
205 time_adjust += MAX_TICKADJ; 262 time_adjust -= MAX_TICKADJ;
206 tick_length -= MAX_TICKADJ_SCALED; 263 tick_length += MAX_TICKADJ_SCALED;
207 } else { 264 return;
208 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
209 NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
210 time_adjust = 0;
211 }
212 } 265 }
266
267 if (time_adjust < -MAX_TICKADJ) {
268 time_adjust += MAX_TICKADJ;
269 tick_length -= MAX_TICKADJ_SCALED;
270 return;
271 }
272
273 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
274 << NTP_SCALE_SHIFT;
275 time_adjust = 0;
213} 276}
214 277
215#ifdef CONFIG_GENERIC_CMOS_UPDATE 278#ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
233 * This code is run on a timer. If the clock is set, that timer 296 * This code is run on a timer. If the clock is set, that timer
234 * may not expire at the correct time. Thus, we adjust... 297 * may not expire at the correct time. Thus, we adjust...
235 */ 298 */
236 if (!ntp_synced()) 299 if (!ntp_synced()) {
237 /* 300 /*
238 * Not synced, exit, do not restart a timer (if one is 301 * Not synced, exit, do not restart a timer (if one is
239 * running, let it run out). 302 * running, let it run out).
240 */ 303 */
241 return; 304 return;
305 }
242 306
243 getnstimeofday(&now); 307 getnstimeofday(&now);
244 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 308 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
@@ -270,7 +334,116 @@ static void notify_cmos_timer(void)
270static inline void notify_cmos_timer(void) { } 334static inline void notify_cmos_timer(void) { }
271#endif 335#endif
272 336
273/* adjtimex mainly allows reading (and writing, if superuser) of 337/*
338 * Start the leap seconds timer:
339 */
340static inline void ntp_start_leap_timer(struct timespec *ts)
341{
342 long now = ts->tv_sec;
343
344 if (time_status & STA_INS) {
345 time_state = TIME_INS;
346 now += 86400 - now % 86400;
347 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
348
349 return;
350 }
351
352 if (time_status & STA_DEL) {
353 time_state = TIME_DEL;
354 now += 86400 - (now + 1) % 86400;
355 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
356 }
357}
358
359/*
360 * Propagate a new txc->status value into the NTP state:
361 */
362static inline void process_adj_status(struct timex *txc, struct timespec *ts)
363{
364 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
365 time_state = TIME_OK;
366 time_status = STA_UNSYNC;
367 }
368
369 /*
370 * If we turn on PLL adjustments then reset the
371 * reference time to current time.
372 */
373 if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
374 time_reftime = xtime.tv_sec;
375
376 /* only set allowed bits */
377 time_status &= STA_RONLY;
378 time_status |= txc->status & ~STA_RONLY;
379
380 switch (time_state) {
381 case TIME_OK:
382 ntp_start_leap_timer(ts);
383 break;
384 case TIME_INS:
385 case TIME_DEL:
386 time_state = TIME_OK;
387 ntp_start_leap_timer(ts);
388 case TIME_WAIT:
389 if (!(time_status & (STA_INS | STA_DEL)))
390 time_state = TIME_OK;
391 break;
392 case TIME_OOP:
393 hrtimer_restart(&leap_timer);
394 break;
395 }
396}
397/*
398 * Called with the xtime lock held, so we can access and modify
399 * all the global NTP state:
400 */
401static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
402{
403 if (txc->modes & ADJ_STATUS)
404 process_adj_status(txc, ts);
405
406 if (txc->modes & ADJ_NANO)
407 time_status |= STA_NANO;
408
409 if (txc->modes & ADJ_MICRO)
410 time_status &= ~STA_NANO;
411
412 if (txc->modes & ADJ_FREQUENCY) {
413 time_freq = txc->freq * PPM_SCALE;
414 time_freq = min(time_freq, MAXFREQ_SCALED);
415 time_freq = max(time_freq, -MAXFREQ_SCALED);
416 }
417
418 if (txc->modes & ADJ_MAXERROR)
419 time_maxerror = txc->maxerror;
420
421 if (txc->modes & ADJ_ESTERROR)
422 time_esterror = txc->esterror;
423
424 if (txc->modes & ADJ_TIMECONST) {
425 time_constant = txc->constant;
426 if (!(time_status & STA_NANO))
427 time_constant += 4;
428 time_constant = min(time_constant, (long)MAXTC);
429 time_constant = max(time_constant, 0l);
430 }
431
432 if (txc->modes & ADJ_TAI && txc->constant > 0)
433 time_tai = txc->constant;
434
435 if (txc->modes & ADJ_OFFSET)
436 ntp_update_offset(txc->offset);
437
438 if (txc->modes & ADJ_TICK)
439 tick_usec = txc->tick;
440
441 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
442 ntp_update_frequency();
443}
444
445/*
446 * adjtimex mainly allows reading (and writing, if superuser) of
274 * kernel time-keeping variables. used by xntpd. 447 * kernel time-keeping variables. used by xntpd.
275 */ 448 */
276int do_adjtimex(struct timex *txc) 449int do_adjtimex(struct timex *txc)
@@ -291,11 +464,14 @@ int do_adjtimex(struct timex *txc)
291 if (txc->modes && !capable(CAP_SYS_TIME)) 464 if (txc->modes && !capable(CAP_SYS_TIME))
292 return -EPERM; 465 return -EPERM;
293 466
294 /* if the quartz is off by more than 10% something is VERY wrong! */ 467 /*
468 * if the quartz is off by more than 10% then
469 * something is VERY wrong!
470 */
295 if (txc->modes & ADJ_TICK && 471 if (txc->modes & ADJ_TICK &&
296 (txc->tick < 900000/USER_HZ || 472 (txc->tick < 900000/USER_HZ ||
297 txc->tick > 1100000/USER_HZ)) 473 txc->tick > 1100000/USER_HZ))
298 return -EINVAL; 474 return -EINVAL;
299 475
300 if (txc->modes & ADJ_STATUS && time_state != TIME_OK) 476 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
301 hrtimer_cancel(&leap_timer); 477 hrtimer_cancel(&leap_timer);
@@ -305,7 +481,6 @@ int do_adjtimex(struct timex *txc)
305 481
306 write_seqlock_irq(&xtime_lock); 482 write_seqlock_irq(&xtime_lock);
307 483
308 /* If there are input parameters, then process them */
309 if (txc->modes & ADJ_ADJTIME) { 484 if (txc->modes & ADJ_ADJTIME) {
310 long save_adjust = time_adjust; 485 long save_adjust = time_adjust;
311 486
@@ -315,98 +490,24 @@ int do_adjtimex(struct timex *txc)
315 ntp_update_frequency(); 490 ntp_update_frequency();
316 } 491 }
317 txc->offset = save_adjust; 492 txc->offset = save_adjust;
318 goto adj_done; 493 } else {
319 }
320 if (txc->modes) {
321 long sec;
322
323 if (txc->modes & ADJ_STATUS) {
324 if ((time_status & STA_PLL) &&
325 !(txc->status & STA_PLL)) {
326 time_state = TIME_OK;
327 time_status = STA_UNSYNC;
328 }
329 /* only set allowed bits */
330 time_status &= STA_RONLY;
331 time_status |= txc->status & ~STA_RONLY;
332
333 switch (time_state) {
334 case TIME_OK:
335 start_timer:
336 sec = ts.tv_sec;
337 if (time_status & STA_INS) {
338 time_state = TIME_INS;
339 sec += 86400 - sec % 86400;
340 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
341 } else if (time_status & STA_DEL) {
342 time_state = TIME_DEL;
343 sec += 86400 - (sec + 1) % 86400;
344 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
345 }
346 break;
347 case TIME_INS:
348 case TIME_DEL:
349 time_state = TIME_OK;
350 goto start_timer;
351 break;
352 case TIME_WAIT:
353 if (!(time_status & (STA_INS | STA_DEL)))
354 time_state = TIME_OK;
355 break;
356 case TIME_OOP:
357 hrtimer_restart(&leap_timer);
358 break;
359 }
360 }
361
362 if (txc->modes & ADJ_NANO)
363 time_status |= STA_NANO;
364 if (txc->modes & ADJ_MICRO)
365 time_status &= ~STA_NANO;
366
367 if (txc->modes & ADJ_FREQUENCY) {
368 time_freq = (s64)txc->freq * PPM_SCALE;
369 time_freq = min(time_freq, MAXFREQ_SCALED);
370 time_freq = max(time_freq, -MAXFREQ_SCALED);
371 }
372
373 if (txc->modes & ADJ_MAXERROR)
374 time_maxerror = txc->maxerror;
375 if (txc->modes & ADJ_ESTERROR)
376 time_esterror = txc->esterror;
377
378 if (txc->modes & ADJ_TIMECONST) {
379 time_constant = txc->constant;
380 if (!(time_status & STA_NANO))
381 time_constant += 4;
382 time_constant = min(time_constant, (long)MAXTC);
383 time_constant = max(time_constant, 0l);
384 }
385
386 if (txc->modes & ADJ_TAI && txc->constant > 0)
387 time_tai = txc->constant;
388
389 if (txc->modes & ADJ_OFFSET)
390 ntp_update_offset(txc->offset);
391 if (txc->modes & ADJ_TICK)
392 tick_usec = txc->tick;
393 494
394 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) 495 /* If there are input parameters, then process them: */
395 ntp_update_frequency(); 496 if (txc->modes)
396 } 497 process_adjtimex_modes(txc, &ts);
397 498
398 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, 499 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
399 NTP_SCALE_SHIFT); 500 NTP_SCALE_SHIFT);
400 if (!(time_status & STA_NANO)) 501 if (!(time_status & STA_NANO))
401 txc->offset /= NSEC_PER_USEC; 502 txc->offset /= NSEC_PER_USEC;
503 }
402 504
403adj_done:
404 result = time_state; /* mostly `TIME_OK' */ 505 result = time_state; /* mostly `TIME_OK' */
405 if (time_status & (STA_UNSYNC|STA_CLOCKERR)) 506 if (time_status & (STA_UNSYNC|STA_CLOCKERR))
406 result = TIME_ERROR; 507 result = TIME_ERROR;
407 508
408 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * 509 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
409 (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT); 510 PPM_SCALE_INV, NTP_SCALE_SHIFT);
410 txc->maxerror = time_maxerror; 511 txc->maxerror = time_maxerror;
411 txc->esterror = time_esterror; 512 txc->esterror = time_esterror;
412 txc->status = time_status; 513 txc->status = time_status;
@@ -425,6 +526,7 @@ adj_done:
425 txc->calcnt = 0; 526 txc->calcnt = 0;
426 txc->errcnt = 0; 527 txc->errcnt = 0;
427 txc->stbcnt = 0; 528 txc->stbcnt = 0;
529
428 write_sequnlock_irq(&xtime_lock); 530 write_sequnlock_irq(&xtime_lock);
429 531
430 txc->time.tv_sec = ts.tv_sec; 532 txc->time.tv_sec = ts.tv_sec;
@@ -440,6 +542,8 @@ adj_done:
440static int __init ntp_tick_adj_setup(char *str) 542static int __init ntp_tick_adj_setup(char *str)
441{ 543{
442 ntp_tick_adj = simple_strtol(str, NULL, 0); 544 ntp_tick_adj = simple_strtol(str, NULL, 0);
545 ntp_tick_adj <<= NTP_SCALE_SHIFT;
546
443 return 1; 547 return 1;
444} 548}
445 549
diff --git a/kernel/time/timecompare.c b/kernel/time/timecompare.c
new file mode 100644
index 000000000000..71e7f1a19156
--- /dev/null
+++ b/kernel/time/timecompare.c
@@ -0,0 +1,191 @@
1/*
2 * Copyright (C) 2009 Intel Corporation.
3 * Author: Patrick Ohly <patrick.ohly@intel.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20#include <linux/timecompare.h>
21#include <linux/module.h>
22#include <linux/math64.h>
23
24/*
25 * fixed point arithmetic scale factor for skew
26 *
27 * Usually one would measure skew in ppb (parts per billion, 1e9), but
28 * using a factor of 2 simplifies the math.
29 */
30#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)
31
32ktime_t timecompare_transform(struct timecompare *sync,
33 u64 source_tstamp)
34{
35 u64 nsec;
36
37 nsec = source_tstamp + sync->offset;
38 nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
39 TIMECOMPARE_SKEW_RESOLUTION;
40
41 return ns_to_ktime(nsec);
42}
43EXPORT_SYMBOL(timecompare_transform);
44
45int timecompare_offset(struct timecompare *sync,
46 s64 *offset,
47 u64 *source_tstamp)
48{
49 u64 start_source = 0, end_source = 0;
50 struct {
51 s64 offset;
52 s64 duration_target;
53 } buffer[10], sample, *samples;
54 int counter = 0, i;
55 int used;
56 int index;
57 int num_samples = sync->num_samples;
58
59 if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
60 samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
61 if (!samples) {
62 samples = buffer;
63 num_samples = sizeof(buffer)/sizeof(buffer[0]);
64 }
65 } else {
66 samples = buffer;
67 }
68
69 /* run until we have enough valid samples, but do not try forever */
70 i = 0;
71 counter = 0;
72 while (1) {
73 u64 ts;
74 ktime_t start, end;
75
76 start = sync->target();
77 ts = timecounter_read(sync->source);
78 end = sync->target();
79
80 if (!i)
81 start_source = ts;
82
83 /* ignore negative durations */
84 sample.duration_target = ktime_to_ns(ktime_sub(end, start));
85 if (sample.duration_target >= 0) {
86 /*
87 * assume symetric delay to and from source:
88 * average target time corresponds to measured
89 * source time
90 */
91 sample.offset =
92 ktime_to_ns(ktime_add(end, start)) / 2 -
93 ts;
94
95 /* simple insertion sort based on duration */
96 index = counter - 1;
97 while (index >= 0) {
98 if (samples[index].duration_target <
99 sample.duration_target)
100 break;
101 samples[index + 1] = samples[index];
102 index--;
103 }
104 samples[index + 1] = sample;
105 counter++;
106 }
107
108 i++;
109 if (counter >= num_samples || i >= 100000) {
110 end_source = ts;
111 break;
112 }
113 }
114
115 *source_tstamp = (end_source + start_source) / 2;
116
117 /* remove outliers by only using 75% of the samples */
118 used = counter * 3 / 4;
119 if (!used)
120 used = counter;
121 if (used) {
122 /* calculate average */
123 s64 off = 0;
124 for (index = 0; index < used; index++)
125 off += samples[index].offset;
126 *offset = div_s64(off, used);
127 }
128
129 if (samples && samples != buffer)
130 kfree(samples);
131
132 return used;
133}
134EXPORT_SYMBOL(timecompare_offset);
135
136void __timecompare_update(struct timecompare *sync,
137 u64 source_tstamp)
138{
139 s64 offset;
140 u64 average_time;
141
142 if (!timecompare_offset(sync, &offset, &average_time))
143 return;
144
145 if (!sync->last_update) {
146 sync->last_update = average_time;
147 sync->offset = offset;
148 sync->skew = 0;
149 } else {
150 s64 delta_nsec = average_time - sync->last_update;
151
152 /* avoid division by negative or small deltas */
153 if (delta_nsec >= 10000) {
154 s64 delta_offset_nsec = offset - sync->offset;
155 s64 skew; /* delta_offset_nsec *
156 TIMECOMPARE_SKEW_RESOLUTION /
157 delta_nsec */
158 u64 divisor;
159
160 /* div_s64() is limited to 32 bit divisor */
161 skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
162 divisor = delta_nsec;
163 while (unlikely(divisor >= ((s64)1) << 32)) {
164 /* divide both by 2; beware, right shift
165 of negative value has undefined
166 behavior and can only be used for
167 the positive divisor */
168 skew = div_s64(skew, 2);
169 divisor >>= 1;
170 }
171 skew = div_s64(skew, divisor);
172
173 /*
174 * Calculate new overall skew as 4/16 the
175 * old value and 12/16 the new one. This is
176 * a rather arbitrary tradeoff between
177 * only using the latest measurement (0/16 and
178 * 16/16) and even more weight on past measurements.
179 */
180#define TIMECOMPARE_NEW_SKEW_PER_16 12
181 sync->skew =
182 div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
183 sync->skew +
184 TIMECOMPARE_NEW_SKEW_PER_16 * skew,
185 16);
186 sync->last_update = average_time;
187 sync->offset = offset;
188 }
189 }
190}
191EXPORT_SYMBOL(__timecompare_update);
diff --git a/kernel/timer.c b/kernel/timer.c
index 13dd64fe143d..b4555568b4e4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer)
491 debug_object_free(timer, &timer_debug_descr); 491 debug_object_free(timer, &timer_debug_descr);
492} 492}
493 493
494static void __init_timer(struct timer_list *timer); 494static void __init_timer(struct timer_list *timer,
495 const char *name,
496 struct lock_class_key *key);
495 497
496void init_timer_on_stack(struct timer_list *timer) 498void init_timer_on_stack_key(struct timer_list *timer,
499 const char *name,
500 struct lock_class_key *key)
497{ 501{
498 debug_object_init_on_stack(timer, &timer_debug_descr); 502 debug_object_init_on_stack(timer, &timer_debug_descr);
499 __init_timer(timer); 503 __init_timer(timer, name, key);
500} 504}
501EXPORT_SYMBOL_GPL(init_timer_on_stack); 505EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
502 506
503void destroy_timer_on_stack(struct timer_list *timer) 507void destroy_timer_on_stack(struct timer_list *timer)
504{ 508{
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { }
512static inline void debug_timer_deactivate(struct timer_list *timer) { } 516static inline void debug_timer_deactivate(struct timer_list *timer) { }
513#endif 517#endif
514 518
515static void __init_timer(struct timer_list *timer) 519static void __init_timer(struct timer_list *timer,
520 const char *name,
521 struct lock_class_key *key)
516{ 522{
517 timer->entry.next = NULL; 523 timer->entry.next = NULL;
518 timer->base = __raw_get_cpu_var(tvec_bases); 524 timer->base = __raw_get_cpu_var(tvec_bases);
@@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer)
521 timer->start_pid = -1; 527 timer->start_pid = -1;
522 memset(timer->start_comm, 0, TASK_COMM_LEN); 528 memset(timer->start_comm, 0, TASK_COMM_LEN);
523#endif 529#endif
530 lockdep_init_map(&timer->lockdep_map, name, key, 0);
524} 531}
525 532
526/** 533/**
@@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer)
530 * init_timer() must be done to a timer prior calling *any* of the 537 * init_timer() must be done to a timer prior calling *any* of the
531 * other timer functions. 538 * other timer functions.
532 */ 539 */
533void init_timer(struct timer_list *timer) 540void init_timer_key(struct timer_list *timer,
541 const char *name,
542 struct lock_class_key *key)
534{ 543{
535 debug_timer_init(timer); 544 debug_timer_init(timer);
536 __init_timer(timer); 545 __init_timer(timer, name, key);
537} 546}
538EXPORT_SYMBOL(init_timer); 547EXPORT_SYMBOL(init_timer_key);
539 548
540void init_timer_deferrable(struct timer_list *timer) 549void init_timer_deferrable_key(struct timer_list *timer,
550 const char *name,
551 struct lock_class_key *key)
541{ 552{
542 init_timer(timer); 553 init_timer_key(timer, name, key);
543 timer_set_deferrable(timer); 554 timer_set_deferrable(timer);
544} 555}
545EXPORT_SYMBOL(init_timer_deferrable); 556EXPORT_SYMBOL(init_timer_deferrable_key);
546 557
547static inline void detach_timer(struct timer_list *timer, 558static inline void detach_timer(struct timer_list *timer,
548 int clear_pending) 559 int clear_pending)
@@ -589,11 +600,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
589 } 600 }
590} 601}
591 602
592int __mod_timer(struct timer_list *timer, unsigned long expires) 603static inline int
604__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
593{ 605{
594 struct tvec_base *base, *new_base; 606 struct tvec_base *base, *new_base;
595 unsigned long flags; 607 unsigned long flags;
596 int ret = 0; 608 int ret;
609
610 ret = 0;
597 611
598 timer_stats_timer_set_start_info(timer); 612 timer_stats_timer_set_start_info(timer);
599 BUG_ON(!timer->function); 613 BUG_ON(!timer->function);
@@ -603,6 +617,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
603 if (timer_pending(timer)) { 617 if (timer_pending(timer)) {
604 detach_timer(timer, 0); 618 detach_timer(timer, 0);
605 ret = 1; 619 ret = 1;
620 } else {
621 if (pending_only)
622 goto out_unlock;
606 } 623 }
607 624
608 debug_timer_activate(timer); 625 debug_timer_activate(timer);
@@ -629,42 +646,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
629 646
630 timer->expires = expires; 647 timer->expires = expires;
631 internal_add_timer(base, timer); 648 internal_add_timer(base, timer);
649
650out_unlock:
632 spin_unlock_irqrestore(&base->lock, flags); 651 spin_unlock_irqrestore(&base->lock, flags);
633 652
634 return ret; 653 return ret;
635} 654}
636 655
637EXPORT_SYMBOL(__mod_timer);
638
639/** 656/**
640 * add_timer_on - start a timer on a particular CPU 657 * mod_timer_pending - modify a pending timer's timeout
641 * @timer: the timer to be added 658 * @timer: the pending timer to be modified
642 * @cpu: the CPU to start it on 659 * @expires: new timeout in jiffies
643 * 660 *
644 * This is not very scalable on SMP. Double adds are not possible. 661 * mod_timer_pending() is the same for pending timers as mod_timer(),
662 * but will not re-activate and modify already deleted timers.
663 *
664 * It is useful for unserialized use of timers.
645 */ 665 */
646void add_timer_on(struct timer_list *timer, int cpu) 666int mod_timer_pending(struct timer_list *timer, unsigned long expires)
647{ 667{
648 struct tvec_base *base = per_cpu(tvec_bases, cpu); 668 return __mod_timer(timer, expires, true);
649 unsigned long flags;
650
651 timer_stats_timer_set_start_info(timer);
652 BUG_ON(timer_pending(timer) || !timer->function);
653 spin_lock_irqsave(&base->lock, flags);
654 timer_set_base(timer, base);
655 debug_timer_activate(timer);
656 internal_add_timer(base, timer);
657 /*
658 * Check whether the other CPU is idle and needs to be
659 * triggered to reevaluate the timer wheel when nohz is
660 * active. We are protected against the other CPU fiddling
661 * with the timer by holding the timer base lock. This also
662 * makes sure that a CPU on the way to idle can not evaluate
663 * the timer wheel.
664 */
665 wake_up_idle_cpu(cpu);
666 spin_unlock_irqrestore(&base->lock, flags);
667} 669}
670EXPORT_SYMBOL(mod_timer_pending);
668 671
669/** 672/**
670 * mod_timer - modify a timer's timeout 673 * mod_timer - modify a timer's timeout
@@ -688,9 +691,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
688 */ 691 */
689int mod_timer(struct timer_list *timer, unsigned long expires) 692int mod_timer(struct timer_list *timer, unsigned long expires)
690{ 693{
691 BUG_ON(!timer->function);
692
693 timer_stats_timer_set_start_info(timer);
694 /* 694 /*
695 * This is a common optimization triggered by the 695 * This is a common optimization triggered by the
696 * networking code - if the timer is re-modified 696 * networking code - if the timer is re-modified
@@ -699,12 +699,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
699 if (timer->expires == expires && timer_pending(timer)) 699 if (timer->expires == expires && timer_pending(timer))
700 return 1; 700 return 1;
701 701
702 return __mod_timer(timer, expires); 702 return __mod_timer(timer, expires, false);
703} 703}
704
705EXPORT_SYMBOL(mod_timer); 704EXPORT_SYMBOL(mod_timer);
706 705
707/** 706/**
707 * add_timer - start a timer
708 * @timer: the timer to be added
709 *
710 * The kernel will do a ->function(->data) callback from the
711 * timer interrupt at the ->expires point in the future. The
712 * current time is 'jiffies'.
713 *
714 * The timer's ->expires, ->function (and if the handler uses it, ->data)
715 * fields must be set prior calling this function.
716 *
717 * Timers with an ->expires field in the past will be executed in the next
718 * timer tick.
719 */
720void add_timer(struct timer_list *timer)
721{
722 BUG_ON(timer_pending(timer));
723 mod_timer(timer, timer->expires);
724}
725EXPORT_SYMBOL(add_timer);
726
727/**
728 * add_timer_on - start a timer on a particular CPU
729 * @timer: the timer to be added
730 * @cpu: the CPU to start it on
731 *
732 * This is not very scalable on SMP. Double adds are not possible.
733 */
734void add_timer_on(struct timer_list *timer, int cpu)
735{
736 struct tvec_base *base = per_cpu(tvec_bases, cpu);
737 unsigned long flags;
738
739 timer_stats_timer_set_start_info(timer);
740 BUG_ON(timer_pending(timer) || !timer->function);
741 spin_lock_irqsave(&base->lock, flags);
742 timer_set_base(timer, base);
743 debug_timer_activate(timer);
744 internal_add_timer(base, timer);
745 /*
746 * Check whether the other CPU is idle and needs to be
747 * triggered to reevaluate the timer wheel when nohz is
748 * active. We are protected against the other CPU fiddling
749 * with the timer by holding the timer base lock. This also
750 * makes sure that a CPU on the way to idle can not evaluate
751 * the timer wheel.
752 */
753 wake_up_idle_cpu(cpu);
754 spin_unlock_irqrestore(&base->lock, flags);
755}
756
757/**
708 * del_timer - deactive a timer. 758 * del_timer - deactive a timer.
709 * @timer: the timer to be deactivated 759 * @timer: the timer to be deactivated
710 * 760 *
@@ -733,7 +783,6 @@ int del_timer(struct timer_list *timer)
733 783
734 return ret; 784 return ret;
735} 785}
736
737EXPORT_SYMBOL(del_timer); 786EXPORT_SYMBOL(del_timer);
738 787
739#ifdef CONFIG_SMP 788#ifdef CONFIG_SMP
@@ -767,7 +816,6 @@ out:
767 816
768 return ret; 817 return ret;
769} 818}
770
771EXPORT_SYMBOL(try_to_del_timer_sync); 819EXPORT_SYMBOL(try_to_del_timer_sync);
772 820
773/** 821/**
@@ -789,6 +837,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
789 */ 837 */
790int del_timer_sync(struct timer_list *timer) 838int del_timer_sync(struct timer_list *timer)
791{ 839{
840#ifdef CONFIG_LOCKDEP
841 unsigned long flags;
842
843 local_irq_save(flags);
844 lock_map_acquire(&timer->lockdep_map);
845 lock_map_release(&timer->lockdep_map);
846 local_irq_restore(flags);
847#endif
848
792 for (;;) { 849 for (;;) {
793 int ret = try_to_del_timer_sync(timer); 850 int ret = try_to_del_timer_sync(timer);
794 if (ret >= 0) 851 if (ret >= 0)
@@ -796,7 +853,6 @@ int del_timer_sync(struct timer_list *timer)
796 cpu_relax(); 853 cpu_relax();
797 } 854 }
798} 855}
799
800EXPORT_SYMBOL(del_timer_sync); 856EXPORT_SYMBOL(del_timer_sync);
801#endif 857#endif
802 858
@@ -861,10 +917,36 @@ static inline void __run_timers(struct tvec_base *base)
861 917
862 set_running_timer(base, timer); 918 set_running_timer(base, timer);
863 detach_timer(timer, 1); 919 detach_timer(timer, 1);
920
864 spin_unlock_irq(&base->lock); 921 spin_unlock_irq(&base->lock);
865 { 922 {
866 int preempt_count = preempt_count(); 923 int preempt_count = preempt_count();
924
925#ifdef CONFIG_LOCKDEP
926 /*
927 * It is permissible to free the timer from
928 * inside the function that is called from
929 * it, this we need to take into account for
930 * lockdep too. To avoid bogus "held lock
931 * freed" warnings as well as problems when
932 * looking into timer->lockdep_map, make a
933 * copy and use that here.
934 */
935 struct lockdep_map lockdep_map =
936 timer->lockdep_map;
937#endif
938 /*
939 * Couple the lock chain with the lock chain at
940 * del_timer_sync() by acquiring the lock_map
941 * around the fn() call here and in
942 * del_timer_sync().
943 */
944 lock_map_acquire(&lockdep_map);
945
867 fn(data); 946 fn(data);
947
948 lock_map_release(&lockdep_map);
949
868 if (preempt_count != preempt_count()) { 950 if (preempt_count != preempt_count()) {
869 printk(KERN_ERR "huh, entered %p " 951 printk(KERN_ERR "huh, entered %p "
870 "with preempt_count %08x, exited" 952 "with preempt_count %08x, exited"
@@ -1268,7 +1350,7 @@ signed long __sched schedule_timeout(signed long timeout)
1268 expire = timeout + jiffies; 1350 expire = timeout + jiffies;
1269 1351
1270 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1352 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1271 __mod_timer(&timer, expire); 1353 __mod_timer(&timer, expire, false);
1272 schedule(); 1354 schedule();
1273 del_singleshot_timer_sync(&timer); 1355 del_singleshot_timer_sync(&timer);
1274 1356
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 34e707e5ab87..504086ab4443 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -72,11 +72,10 @@ config FUNCTION_GRAPH_TRACER
72 help 72 help
73 Enable the kernel to trace a function at both its return 73 Enable the kernel to trace a function at both its return
74 and its entry. 74 and its entry.
75 It's first purpose is to trace the duration of functions and 75 Its first purpose is to trace the duration of functions and
76 draw a call graph for each thread with some informations like 76 draw a call graph for each thread with some information like
77 the return value. 77 the return value. This is done by setting the current return
78 This is done by setting the current return address on the current 78 address on the current task structure into a stack of calls.
79 task structure into a stack of calls.
80 79
81config IRQSOFF_TRACER 80config IRQSOFF_TRACER
82 bool "Interrupts-off Latency Tracer" 81 bool "Interrupts-off Latency Tracer"
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fdf913dfc7e8..53e8c8bc0c98 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1908,7 +1908,7 @@ int register_ftrace_function(struct ftrace_ops *ops)
1908} 1908}
1909 1909
1910/** 1910/**
1911 * unregister_ftrace_function - unresgister a function for profiling. 1911 * unregister_ftrace_function - unregister a function for profiling.
1912 * @ops - ops structure that holds the function to unregister 1912 * @ops - ops structure that holds the function to unregister
1913 * 1913 *
1914 * Unregister a function that was added to be called by ftrace profiling. 1914 * Unregister a function that was added to be called by ftrace profiling.
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 930c08e5b38e..dce71a5b51bc 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -42,6 +42,81 @@ static struct tracer_flags tracer_flags = {
42/* pid on the last trace processed */ 42/* pid on the last trace processed */
43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; 43static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 };
44 44
45/* Add a function return address to the trace stack on thread info.*/
46int
47ftrace_push_return_trace(unsigned long ret, unsigned long long time,
48 unsigned long func, int *depth)
49{
50 int index;
51
52 if (!current->ret_stack)
53 return -EBUSY;
54
55 /* The return trace stack is full */
56 if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
57 atomic_inc(&current->trace_overrun);
58 return -EBUSY;
59 }
60
61 index = ++current->curr_ret_stack;
62 barrier();
63 current->ret_stack[index].ret = ret;
64 current->ret_stack[index].func = func;
65 current->ret_stack[index].calltime = time;
66 *depth = index;
67
68 return 0;
69}
70
71/* Retrieve a function return address to the trace stack on thread info.*/
72void
73ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
74{
75 int index;
76
77 index = current->curr_ret_stack;
78
79 if (unlikely(index < 0)) {
80 ftrace_graph_stop();
81 WARN_ON(1);
82 /* Might as well panic, otherwise we have no where to go */
83 *ret = (unsigned long)panic;
84 return;
85 }
86
87 *ret = current->ret_stack[index].ret;
88 trace->func = current->ret_stack[index].func;
89 trace->calltime = current->ret_stack[index].calltime;
90 trace->overrun = atomic_read(&current->trace_overrun);
91 trace->depth = index;
92 barrier();
93 current->curr_ret_stack--;
94
95}
96
97/*
98 * Send the trace to the ring-buffer.
99 * @return the original return address.
100 */
101unsigned long ftrace_return_to_handler(void)
102{
103 struct ftrace_graph_ret trace;
104 unsigned long ret;
105
106 ftrace_pop_return_trace(&trace, &ret);
107 trace.rettime = cpu_clock(raw_smp_processor_id());
108 ftrace_graph_return(&trace);
109
110 if (unlikely(!ret)) {
111 ftrace_graph_stop();
112 WARN_ON(1);
113 /* Might as well panic. What else to do? */
114 ret = (unsigned long)panic;
115 }
116
117 return ret;
118}
119
45static int graph_trace_init(struct trace_array *tr) 120static int graph_trace_init(struct trace_array *tr)
46{ 121{
47 int cpu, ret; 122 int cpu, ret;
diff --git a/kernel/user.c b/kernel/user.c
index fbb300e6191f..850e0ba41c1e 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -20,7 +20,7 @@
20 20
21struct user_namespace init_user_ns = { 21struct user_namespace init_user_ns = {
22 .kref = { 22 .kref = {
23 .refcount = ATOMIC_INIT(1), 23 .refcount = ATOMIC_INIT(2),
24 }, 24 },
25 .creator = &root_user, 25 .creator = &root_user,
26}; 26};
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 3b34b3545936..92359cc747a7 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -37,7 +37,7 @@ static void put_uts(ctl_table *table, int write, void *which)
37 up_write(&uts_sem); 37 up_write(&uts_sem);
38} 38}
39 39
40#ifdef CONFIG_PROC_FS 40#ifdef CONFIG_PROC_SYSCTL
41/* 41/*
42 * Special case of dostring for the UTS structure. This has locks 42 * Special case of dostring for the UTS structure. This has locks
43 * to observe. Should this be in kernel/sys.c ???? 43 * to observe. Should this be in kernel/sys.c ????
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 1f0c509b40d3..32f8e0d2bf5a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -48,8 +48,6 @@ struct cpu_workqueue_struct {
48 48
49 struct workqueue_struct *wq; 49 struct workqueue_struct *wq;
50 struct task_struct *thread; 50 struct task_struct *thread;
51
52 int run_depth; /* Detect run_workqueue() recursion depth */
53} ____cacheline_aligned; 51} ____cacheline_aligned;
54 52
55/* 53/*
@@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on);
262static void run_workqueue(struct cpu_workqueue_struct *cwq) 260static void run_workqueue(struct cpu_workqueue_struct *cwq)
263{ 261{
264 spin_lock_irq(&cwq->lock); 262 spin_lock_irq(&cwq->lock);
265 cwq->run_depth++;
266 if (cwq->run_depth > 3) {
267 /* morton gets to eat his hat */
268 printk("%s: recursion depth exceeded: %d\n",
269 __func__, cwq->run_depth);
270 dump_stack();
271 }
272 while (!list_empty(&cwq->worklist)) { 263 while (!list_empty(&cwq->worklist)) {
273 struct work_struct *work = list_entry(cwq->worklist.next, 264 struct work_struct *work = list_entry(cwq->worklist.next,
274 struct work_struct, entry); 265 struct work_struct, entry);
@@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
311 spin_lock_irq(&cwq->lock); 302 spin_lock_irq(&cwq->lock);
312 cwq->current_work = NULL; 303 cwq->current_work = NULL;
313 } 304 }
314 cwq->run_depth--;
315 spin_unlock_irq(&cwq->lock); 305 spin_unlock_irq(&cwq->lock);
316} 306}
317 307
@@ -368,29 +358,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
368 358
369static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) 359static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
370{ 360{
371 int active; 361 int active = 0;
362 struct wq_barrier barr;
372 363
373 if (cwq->thread == current) { 364 WARN_ON(cwq->thread == current);
374 /*
375 * Probably keventd trying to flush its own queue. So simply run
376 * it by hand rather than deadlocking.
377 */
378 run_workqueue(cwq);
379 active = 1;
380 } else {
381 struct wq_barrier barr;
382 365
383 active = 0; 366 spin_lock_irq(&cwq->lock);
384 spin_lock_irq(&cwq->lock); 367 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
385 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { 368 insert_wq_barrier(cwq, &barr, &cwq->worklist);
386 insert_wq_barrier(cwq, &barr, &cwq->worklist); 369 active = 1;
387 active = 1;
388 }
389 spin_unlock_irq(&cwq->lock);
390
391 if (active)
392 wait_for_completion(&barr.done);
393 } 370 }
371 spin_unlock_irq(&cwq->lock);
372
373 if (active)
374 wait_for_completion(&barr.done);
394 375
395 return active; 376 return active;
396} 377}
@@ -416,7 +397,7 @@ void flush_workqueue(struct workqueue_struct *wq)
416 might_sleep(); 397 might_sleep();
417 lock_map_acquire(&wq->lockdep_map); 398 lock_map_acquire(&wq->lockdep_map);
418 lock_map_release(&wq->lockdep_map); 399 lock_map_release(&wq->lockdep_map);
419 for_each_cpu_mask_nr(cpu, *cpu_map) 400 for_each_cpu(cpu, cpu_map)
420 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 401 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
421} 402}
422EXPORT_SYMBOL_GPL(flush_workqueue); 403EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -547,7 +528,7 @@ static void wait_on_work(struct work_struct *work)
547 wq = cwq->wq; 528 wq = cwq->wq;
548 cpu_map = wq_cpu_map(wq); 529 cpu_map = wq_cpu_map(wq);
549 530
550 for_each_cpu_mask_nr(cpu, *cpu_map) 531 for_each_cpu(cpu, cpu_map)
551 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 532 wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
552} 533}
553 534
@@ -911,7 +892,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
911 list_del(&wq->list); 892 list_del(&wq->list);
912 spin_unlock(&workqueue_lock); 893 spin_unlock(&workqueue_lock);
913 894
914 for_each_cpu_mask_nr(cpu, *cpu_map) 895 for_each_cpu(cpu, cpu_map)
915 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); 896 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
916 cpu_maps_update_done(); 897 cpu_maps_update_done();
917 898