diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 430 | ||||
-rw-r--r-- | kernel/cgroup_debug.c | 2 | ||||
-rw-r--r-- | kernel/cpuset.c | 254 | ||||
-rw-r--r-- | kernel/exit.c | 213 | ||||
-rw-r--r-- | kernel/fork.c | 7 | ||||
-rw-r--r-- | kernel/kexec.c | 3 | ||||
-rw-r--r-- | kernel/ns_cgroup.c | 14 | ||||
-rw-r--r-- | kernel/pid.c | 33 | ||||
-rw-r--r-- | kernel/pid_namespace.c | 15 | ||||
-rw-r--r-- | kernel/power/disk.c | 1 | ||||
-rw-r--r-- | kernel/power/snapshot.c | 9 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 18 | ||||
-rw-r--r-- | kernel/printk.c | 19 | ||||
-rw-r--r-- | kernel/ptrace.c | 101 | ||||
-rw-r--r-- | kernel/relay.c | 8 | ||||
-rw-r--r-- | kernel/sched.c | 23 | ||||
-rw-r--r-- | kernel/signal.c | 63 | ||||
-rw-r--r-- | kernel/spinlock.c | 18 | ||||
-rw-r--r-- | kernel/sys.c | 4 | ||||
-rw-r--r-- | kernel/sysctl.c | 17 | ||||
-rw-r--r-- | kernel/utsname_sysctl.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 41 |
22 files changed, 845 insertions, 450 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index c500ca7239b2..382109b5baeb 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -94,7 +94,6 @@ struct cgroupfs_root { | |||
94 | char release_agent_path[PATH_MAX]; | 94 | char release_agent_path[PATH_MAX]; |
95 | }; | 95 | }; |
96 | 96 | ||
97 | |||
98 | /* | 97 | /* |
99 | * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the | 98 | * The "rootnode" hierarchy is the "dummy hierarchy", reserved for the |
100 | * subsystems that are otherwise unattached - it never has more than a | 99 | * subsystems that are otherwise unattached - it never has more than a |
@@ -102,6 +101,39 @@ struct cgroupfs_root { | |||
102 | */ | 101 | */ |
103 | static struct cgroupfs_root rootnode; | 102 | static struct cgroupfs_root rootnode; |
104 | 103 | ||
104 | /* | ||
105 | * CSS ID -- ID per subsys's Cgroup Subsys State(CSS). used only when | ||
106 | * cgroup_subsys->use_id != 0. | ||
107 | */ | ||
108 | #define CSS_ID_MAX (65535) | ||
109 | struct css_id { | ||
110 | /* | ||
111 | * The css to which this ID points. This pointer is set to valid value | ||
112 | * after cgroup is populated. If cgroup is removed, this will be NULL. | ||
113 | * This pointer is expected to be RCU-safe because destroy() | ||
114 | * is called after synchronize_rcu(). But for safe use, css_is_removed() | ||
115 | * css_tryget() should be used for avoiding race. | ||
116 | */ | ||
117 | struct cgroup_subsys_state *css; | ||
118 | /* | ||
119 | * ID of this css. | ||
120 | */ | ||
121 | unsigned short id; | ||
122 | /* | ||
123 | * Depth in hierarchy which this ID belongs to. | ||
124 | */ | ||
125 | unsigned short depth; | ||
126 | /* | ||
127 | * ID is freed by RCU. (and lookup routine is RCU safe.) | ||
128 | */ | ||
129 | struct rcu_head rcu_head; | ||
130 | /* | ||
131 | * Hierarchy of CSS ID belongs to. | ||
132 | */ | ||
133 | unsigned short stack[0]; /* Array of Length (depth+1) */ | ||
134 | }; | ||
135 | |||
136 | |||
105 | /* The list of hierarchy roots */ | 137 | /* The list of hierarchy roots */ |
106 | 138 | ||
107 | static LIST_HEAD(roots); | 139 | static LIST_HEAD(roots); |
@@ -185,6 +217,8 @@ struct cg_cgroup_link { | |||
185 | static struct css_set init_css_set; | 217 | static struct css_set init_css_set; |
186 | static struct cg_cgroup_link init_css_set_link; | 218 | static struct cg_cgroup_link init_css_set_link; |
187 | 219 | ||
220 | static int cgroup_subsys_init_idr(struct cgroup_subsys *ss); | ||
221 | |||
188 | /* css_set_lock protects the list of css_set objects, and the | 222 | /* css_set_lock protects the list of css_set objects, and the |
189 | * chain of tasks off each css_set. Nests outside task->alloc_lock | 223 | * chain of tasks off each css_set. Nests outside task->alloc_lock |
190 | * due to cgroup_iter_start() */ | 224 | * due to cgroup_iter_start() */ |
@@ -567,6 +601,9 @@ static struct backing_dev_info cgroup_backing_dev_info = { | |||
567 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, | 601 | .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, |
568 | }; | 602 | }; |
569 | 603 | ||
604 | static int alloc_css_id(struct cgroup_subsys *ss, | ||
605 | struct cgroup *parent, struct cgroup *child); | ||
606 | |||
570 | static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) | 607 | static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) |
571 | { | 608 | { |
572 | struct inode *inode = new_inode(sb); | 609 | struct inode *inode = new_inode(sb); |
@@ -585,13 +622,18 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) | |||
585 | * Call subsys's pre_destroy handler. | 622 | * Call subsys's pre_destroy handler. |
586 | * This is called before css refcnt check. | 623 | * This is called before css refcnt check. |
587 | */ | 624 | */ |
588 | static void cgroup_call_pre_destroy(struct cgroup *cgrp) | 625 | static int cgroup_call_pre_destroy(struct cgroup *cgrp) |
589 | { | 626 | { |
590 | struct cgroup_subsys *ss; | 627 | struct cgroup_subsys *ss; |
628 | int ret = 0; | ||
629 | |||
591 | for_each_subsys(cgrp->root, ss) | 630 | for_each_subsys(cgrp->root, ss) |
592 | if (ss->pre_destroy) | 631 | if (ss->pre_destroy) { |
593 | ss->pre_destroy(ss, cgrp); | 632 | ret = ss->pre_destroy(ss, cgrp); |
594 | return; | 633 | if (ret) |
634 | break; | ||
635 | } | ||
636 | return ret; | ||
595 | } | 637 | } |
596 | 638 | ||
597 | static void free_cgroup_rcu(struct rcu_head *obj) | 639 | static void free_cgroup_rcu(struct rcu_head *obj) |
@@ -685,6 +727,22 @@ static void cgroup_d_remove_dir(struct dentry *dentry) | |||
685 | remove_dir(dentry); | 727 | remove_dir(dentry); |
686 | } | 728 | } |
687 | 729 | ||
730 | /* | ||
731 | * A queue for waiters to do rmdir() cgroup. A tasks will sleep when | ||
732 | * cgroup->count == 0 && list_empty(&cgroup->children) && subsys has some | ||
733 | * reference to css->refcnt. In general, this refcnt is expected to goes down | ||
734 | * to zero, soon. | ||
735 | * | ||
736 | * CGRP_WAIT_ON_RMDIR flag is modified under cgroup's inode->i_mutex; | ||
737 | */ | ||
738 | DECLARE_WAIT_QUEUE_HEAD(cgroup_rmdir_waitq); | ||
739 | |||
740 | static void cgroup_wakeup_rmdir_waiters(const struct cgroup *cgrp) | ||
741 | { | ||
742 | if (unlikely(test_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags))) | ||
743 | wake_up_all(&cgroup_rmdir_waitq); | ||
744 | } | ||
745 | |||
688 | static int rebind_subsystems(struct cgroupfs_root *root, | 746 | static int rebind_subsystems(struct cgroupfs_root *root, |
689 | unsigned long final_bits) | 747 | unsigned long final_bits) |
690 | { | 748 | { |
@@ -857,16 +915,16 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data) | |||
857 | } | 915 | } |
858 | 916 | ||
859 | ret = rebind_subsystems(root, opts.subsys_bits); | 917 | ret = rebind_subsystems(root, opts.subsys_bits); |
918 | if (ret) | ||
919 | goto out_unlock; | ||
860 | 920 | ||
861 | /* (re)populate subsystem files */ | 921 | /* (re)populate subsystem files */ |
862 | if (!ret) | 922 | cgroup_populate_dir(cgrp); |
863 | cgroup_populate_dir(cgrp); | ||
864 | 923 | ||
865 | if (opts.release_agent) | 924 | if (opts.release_agent) |
866 | strcpy(root->release_agent_path, opts.release_agent); | 925 | strcpy(root->release_agent_path, opts.release_agent); |
867 | out_unlock: | 926 | out_unlock: |
868 | if (opts.release_agent) | 927 | kfree(opts.release_agent); |
869 | kfree(opts.release_agent); | ||
870 | mutex_unlock(&cgroup_mutex); | 928 | mutex_unlock(&cgroup_mutex); |
871 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); | 929 | mutex_unlock(&cgrp->dentry->d_inode->i_mutex); |
872 | return ret; | 930 | return ret; |
@@ -969,15 +1027,13 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
969 | /* First find the desired set of subsystems */ | 1027 | /* First find the desired set of subsystems */ |
970 | ret = parse_cgroupfs_options(data, &opts); | 1028 | ret = parse_cgroupfs_options(data, &opts); |
971 | if (ret) { | 1029 | if (ret) { |
972 | if (opts.release_agent) | 1030 | kfree(opts.release_agent); |
973 | kfree(opts.release_agent); | ||
974 | return ret; | 1031 | return ret; |
975 | } | 1032 | } |
976 | 1033 | ||
977 | root = kzalloc(sizeof(*root), GFP_KERNEL); | 1034 | root = kzalloc(sizeof(*root), GFP_KERNEL); |
978 | if (!root) { | 1035 | if (!root) { |
979 | if (opts.release_agent) | 1036 | kfree(opts.release_agent); |
980 | kfree(opts.release_agent); | ||
981 | return -ENOMEM; | 1037 | return -ENOMEM; |
982 | } | 1038 | } |
983 | 1039 | ||
@@ -1280,6 +1336,12 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1280 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | 1336 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); |
1281 | synchronize_rcu(); | 1337 | synchronize_rcu(); |
1282 | put_css_set(cg); | 1338 | put_css_set(cg); |
1339 | |||
1340 | /* | ||
1341 | * wake up rmdir() waiter. the rmdir should fail since the cgroup | ||
1342 | * is no longer empty. | ||
1343 | */ | ||
1344 | cgroup_wakeup_rmdir_waiters(cgrp); | ||
1283 | return 0; | 1345 | return 0; |
1284 | } | 1346 | } |
1285 | 1347 | ||
@@ -1625,7 +1687,7 @@ static struct inode_operations cgroup_dir_inode_operations = { | |||
1625 | .rename = cgroup_rename, | 1687 | .rename = cgroup_rename, |
1626 | }; | 1688 | }; |
1627 | 1689 | ||
1628 | static int cgroup_create_file(struct dentry *dentry, int mode, | 1690 | static int cgroup_create_file(struct dentry *dentry, mode_t mode, |
1629 | struct super_block *sb) | 1691 | struct super_block *sb) |
1630 | { | 1692 | { |
1631 | static const struct dentry_operations cgroup_dops = { | 1693 | static const struct dentry_operations cgroup_dops = { |
@@ -1671,7 +1733,7 @@ static int cgroup_create_file(struct dentry *dentry, int mode, | |||
1671 | * @mode: mode to set on new directory. | 1733 | * @mode: mode to set on new directory. |
1672 | */ | 1734 | */ |
1673 | static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, | 1735 | static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, |
1674 | int mode) | 1736 | mode_t mode) |
1675 | { | 1737 | { |
1676 | struct dentry *parent; | 1738 | struct dentry *parent; |
1677 | int error = 0; | 1739 | int error = 0; |
@@ -1689,6 +1751,33 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry, | |||
1689 | return error; | 1751 | return error; |
1690 | } | 1752 | } |
1691 | 1753 | ||
1754 | /** | ||
1755 | * cgroup_file_mode - deduce file mode of a control file | ||
1756 | * @cft: the control file in question | ||
1757 | * | ||
1758 | * returns cft->mode if ->mode is not 0 | ||
1759 | * returns S_IRUGO|S_IWUSR if it has both a read and a write handler | ||
1760 | * returns S_IRUGO if it has only a read handler | ||
1761 | * returns S_IWUSR if it has only a write hander | ||
1762 | */ | ||
1763 | static mode_t cgroup_file_mode(const struct cftype *cft) | ||
1764 | { | ||
1765 | mode_t mode = 0; | ||
1766 | |||
1767 | if (cft->mode) | ||
1768 | return cft->mode; | ||
1769 | |||
1770 | if (cft->read || cft->read_u64 || cft->read_s64 || | ||
1771 | cft->read_map || cft->read_seq_string) | ||
1772 | mode |= S_IRUGO; | ||
1773 | |||
1774 | if (cft->write || cft->write_u64 || cft->write_s64 || | ||
1775 | cft->write_string || cft->trigger) | ||
1776 | mode |= S_IWUSR; | ||
1777 | |||
1778 | return mode; | ||
1779 | } | ||
1780 | |||
1692 | int cgroup_add_file(struct cgroup *cgrp, | 1781 | int cgroup_add_file(struct cgroup *cgrp, |
1693 | struct cgroup_subsys *subsys, | 1782 | struct cgroup_subsys *subsys, |
1694 | const struct cftype *cft) | 1783 | const struct cftype *cft) |
@@ -1696,6 +1785,7 @@ int cgroup_add_file(struct cgroup *cgrp, | |||
1696 | struct dentry *dir = cgrp->dentry; | 1785 | struct dentry *dir = cgrp->dentry; |
1697 | struct dentry *dentry; | 1786 | struct dentry *dentry; |
1698 | int error; | 1787 | int error; |
1788 | mode_t mode; | ||
1699 | 1789 | ||
1700 | char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; | 1790 | char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 }; |
1701 | if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { | 1791 | if (subsys && !test_bit(ROOT_NOPREFIX, &cgrp->root->flags)) { |
@@ -1706,7 +1796,8 @@ int cgroup_add_file(struct cgroup *cgrp, | |||
1706 | BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); | 1796 | BUG_ON(!mutex_is_locked(&dir->d_inode->i_mutex)); |
1707 | dentry = lookup_one_len(name, dir, strlen(name)); | 1797 | dentry = lookup_one_len(name, dir, strlen(name)); |
1708 | if (!IS_ERR(dentry)) { | 1798 | if (!IS_ERR(dentry)) { |
1709 | error = cgroup_create_file(dentry, 0644 | S_IFREG, | 1799 | mode = cgroup_file_mode(cft); |
1800 | error = cgroup_create_file(dentry, mode | S_IFREG, | ||
1710 | cgrp->root->sb); | 1801 | cgrp->root->sb); |
1711 | if (!error) | 1802 | if (!error) |
1712 | dentry->d_fsdata = (void *)cft; | 1803 | dentry->d_fsdata = (void *)cft; |
@@ -2288,6 +2379,7 @@ static struct cftype files[] = { | |||
2288 | .write_u64 = cgroup_tasks_write, | 2379 | .write_u64 = cgroup_tasks_write, |
2289 | .release = cgroup_tasks_release, | 2380 | .release = cgroup_tasks_release, |
2290 | .private = FILE_TASKLIST, | 2381 | .private = FILE_TASKLIST, |
2382 | .mode = S_IRUGO | S_IWUSR, | ||
2291 | }, | 2383 | }, |
2292 | 2384 | ||
2293 | { | 2385 | { |
@@ -2327,6 +2419,17 @@ static int cgroup_populate_dir(struct cgroup *cgrp) | |||
2327 | if (ss->populate && (err = ss->populate(ss, cgrp)) < 0) | 2419 | if (ss->populate && (err = ss->populate(ss, cgrp)) < 0) |
2328 | return err; | 2420 | return err; |
2329 | } | 2421 | } |
2422 | /* This cgroup is ready now */ | ||
2423 | for_each_subsys(cgrp->root, ss) { | ||
2424 | struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; | ||
2425 | /* | ||
2426 | * Update id->css pointer and make this css visible from | ||
2427 | * CSS ID functions. This pointer will be dereferened | ||
2428 | * from RCU-read-side without locks. | ||
2429 | */ | ||
2430 | if (css->id) | ||
2431 | rcu_assign_pointer(css->id->css, css); | ||
2432 | } | ||
2330 | 2433 | ||
2331 | return 0; | 2434 | return 0; |
2332 | } | 2435 | } |
@@ -2338,6 +2441,7 @@ static void init_cgroup_css(struct cgroup_subsys_state *css, | |||
2338 | css->cgroup = cgrp; | 2441 | css->cgroup = cgrp; |
2339 | atomic_set(&css->refcnt, 1); | 2442 | atomic_set(&css->refcnt, 1); |
2340 | css->flags = 0; | 2443 | css->flags = 0; |
2444 | css->id = NULL; | ||
2341 | if (cgrp == dummytop) | 2445 | if (cgrp == dummytop) |
2342 | set_bit(CSS_ROOT, &css->flags); | 2446 | set_bit(CSS_ROOT, &css->flags); |
2343 | BUG_ON(cgrp->subsys[ss->subsys_id]); | 2447 | BUG_ON(cgrp->subsys[ss->subsys_id]); |
@@ -2376,7 +2480,7 @@ static void cgroup_unlock_hierarchy(struct cgroupfs_root *root) | |||
2376 | * Must be called with the mutex on the parent inode held | 2480 | * Must be called with the mutex on the parent inode held |
2377 | */ | 2481 | */ |
2378 | static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | 2482 | static long cgroup_create(struct cgroup *parent, struct dentry *dentry, |
2379 | int mode) | 2483 | mode_t mode) |
2380 | { | 2484 | { |
2381 | struct cgroup *cgrp; | 2485 | struct cgroup *cgrp; |
2382 | struct cgroupfs_root *root = parent->root; | 2486 | struct cgroupfs_root *root = parent->root; |
@@ -2413,6 +2517,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
2413 | goto err_destroy; | 2517 | goto err_destroy; |
2414 | } | 2518 | } |
2415 | init_cgroup_css(css, ss, cgrp); | 2519 | init_cgroup_css(css, ss, cgrp); |
2520 | if (ss->use_id) | ||
2521 | if (alloc_css_id(ss, parent, cgrp)) | ||
2522 | goto err_destroy; | ||
2523 | /* At error, ->destroy() callback has to free assigned ID. */ | ||
2416 | } | 2524 | } |
2417 | 2525 | ||
2418 | cgroup_lock_hierarchy(root); | 2526 | cgroup_lock_hierarchy(root); |
@@ -2555,9 +2663,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) | |||
2555 | struct cgroup *cgrp = dentry->d_fsdata; | 2663 | struct cgroup *cgrp = dentry->d_fsdata; |
2556 | struct dentry *d; | 2664 | struct dentry *d; |
2557 | struct cgroup *parent; | 2665 | struct cgroup *parent; |
2666 | DEFINE_WAIT(wait); | ||
2667 | int ret; | ||
2558 | 2668 | ||
2559 | /* the vfs holds both inode->i_mutex already */ | 2669 | /* the vfs holds both inode->i_mutex already */ |
2560 | 2670 | again: | |
2561 | mutex_lock(&cgroup_mutex); | 2671 | mutex_lock(&cgroup_mutex); |
2562 | if (atomic_read(&cgrp->count) != 0) { | 2672 | if (atomic_read(&cgrp->count) != 0) { |
2563 | mutex_unlock(&cgroup_mutex); | 2673 | mutex_unlock(&cgroup_mutex); |
@@ -2573,17 +2683,39 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) | |||
2573 | * Call pre_destroy handlers of subsys. Notify subsystems | 2683 | * Call pre_destroy handlers of subsys. Notify subsystems |
2574 | * that rmdir() request comes. | 2684 | * that rmdir() request comes. |
2575 | */ | 2685 | */ |
2576 | cgroup_call_pre_destroy(cgrp); | 2686 | ret = cgroup_call_pre_destroy(cgrp); |
2687 | if (ret) | ||
2688 | return ret; | ||
2577 | 2689 | ||
2578 | mutex_lock(&cgroup_mutex); | 2690 | mutex_lock(&cgroup_mutex); |
2579 | parent = cgrp->parent; | 2691 | parent = cgrp->parent; |
2580 | 2692 | if (atomic_read(&cgrp->count) || !list_empty(&cgrp->children)) { | |
2581 | if (atomic_read(&cgrp->count) | ||
2582 | || !list_empty(&cgrp->children) | ||
2583 | || !cgroup_clear_css_refs(cgrp)) { | ||
2584 | mutex_unlock(&cgroup_mutex); | 2693 | mutex_unlock(&cgroup_mutex); |
2585 | return -EBUSY; | 2694 | return -EBUSY; |
2586 | } | 2695 | } |
2696 | /* | ||
2697 | * css_put/get is provided for subsys to grab refcnt to css. In typical | ||
2698 | * case, subsystem has no reference after pre_destroy(). But, under | ||
2699 | * hierarchy management, some *temporal* refcnt can be hold. | ||
2700 | * To avoid returning -EBUSY to a user, waitqueue is used. If subsys | ||
2701 | * is really busy, it should return -EBUSY at pre_destroy(). wake_up | ||
2702 | * is called when css_put() is called and refcnt goes down to 0. | ||
2703 | */ | ||
2704 | set_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
2705 | prepare_to_wait(&cgroup_rmdir_waitq, &wait, TASK_INTERRUPTIBLE); | ||
2706 | |||
2707 | if (!cgroup_clear_css_refs(cgrp)) { | ||
2708 | mutex_unlock(&cgroup_mutex); | ||
2709 | schedule(); | ||
2710 | finish_wait(&cgroup_rmdir_waitq, &wait); | ||
2711 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
2712 | if (signal_pending(current)) | ||
2713 | return -EINTR; | ||
2714 | goto again; | ||
2715 | } | ||
2716 | /* NO css_tryget() can success after here. */ | ||
2717 | finish_wait(&cgroup_rmdir_waitq, &wait); | ||
2718 | clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); | ||
2587 | 2719 | ||
2588 | spin_lock(&release_list_lock); | 2720 | spin_lock(&release_list_lock); |
2589 | set_bit(CGRP_REMOVED, &cgrp->flags); | 2721 | set_bit(CGRP_REMOVED, &cgrp->flags); |
@@ -2708,6 +2840,8 @@ int __init cgroup_init(void) | |||
2708 | struct cgroup_subsys *ss = subsys[i]; | 2840 | struct cgroup_subsys *ss = subsys[i]; |
2709 | if (!ss->early_init) | 2841 | if (!ss->early_init) |
2710 | cgroup_init_subsys(ss); | 2842 | cgroup_init_subsys(ss); |
2843 | if (ss->use_id) | ||
2844 | cgroup_subsys_init_idr(ss); | ||
2711 | } | 2845 | } |
2712 | 2846 | ||
2713 | /* Add init_css_set to the hash table */ | 2847 | /* Add init_css_set to the hash table */ |
@@ -3084,18 +3218,19 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | |||
3084 | } | 3218 | } |
3085 | 3219 | ||
3086 | /** | 3220 | /** |
3087 | * cgroup_is_descendant - see if @cgrp is a descendant of current task's cgrp | 3221 | * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp |
3088 | * @cgrp: the cgroup in question | 3222 | * @cgrp: the cgroup in question |
3223 | * @task: the task in question | ||
3089 | * | 3224 | * |
3090 | * See if @cgrp is a descendant of the current task's cgroup in | 3225 | * See if @cgrp is a descendant of @task's cgroup in the appropriate |
3091 | * the appropriate hierarchy. | 3226 | * hierarchy. |
3092 | * | 3227 | * |
3093 | * If we are sending in dummytop, then presumably we are creating | 3228 | * If we are sending in dummytop, then presumably we are creating |
3094 | * the top cgroup in the subsystem. | 3229 | * the top cgroup in the subsystem. |
3095 | * | 3230 | * |
3096 | * Called only by the ns (nsproxy) cgroup. | 3231 | * Called only by the ns (nsproxy) cgroup. |
3097 | */ | 3232 | */ |
3098 | int cgroup_is_descendant(const struct cgroup *cgrp) | 3233 | int cgroup_is_descendant(const struct cgroup *cgrp, struct task_struct *task) |
3099 | { | 3234 | { |
3100 | int ret; | 3235 | int ret; |
3101 | struct cgroup *target; | 3236 | struct cgroup *target; |
@@ -3105,7 +3240,7 @@ int cgroup_is_descendant(const struct cgroup *cgrp) | |||
3105 | return 1; | 3240 | return 1; |
3106 | 3241 | ||
3107 | get_first_subsys(cgrp, NULL, &subsys_id); | 3242 | get_first_subsys(cgrp, NULL, &subsys_id); |
3108 | target = task_cgroup(current, subsys_id); | 3243 | target = task_cgroup(task, subsys_id); |
3109 | while (cgrp != target && cgrp!= cgrp->top_cgroup) | 3244 | while (cgrp != target && cgrp!= cgrp->top_cgroup) |
3110 | cgrp = cgrp->parent; | 3245 | cgrp = cgrp->parent; |
3111 | ret = (cgrp == target); | 3246 | ret = (cgrp == target); |
@@ -3138,10 +3273,12 @@ void __css_put(struct cgroup_subsys_state *css) | |||
3138 | { | 3273 | { |
3139 | struct cgroup *cgrp = css->cgroup; | 3274 | struct cgroup *cgrp = css->cgroup; |
3140 | rcu_read_lock(); | 3275 | rcu_read_lock(); |
3141 | if ((atomic_dec_return(&css->refcnt) == 1) && | 3276 | if (atomic_dec_return(&css->refcnt) == 1) { |
3142 | notify_on_release(cgrp)) { | 3277 | if (notify_on_release(cgrp)) { |
3143 | set_bit(CGRP_RELEASABLE, &cgrp->flags); | 3278 | set_bit(CGRP_RELEASABLE, &cgrp->flags); |
3144 | check_for_release(cgrp); | 3279 | check_for_release(cgrp); |
3280 | } | ||
3281 | cgroup_wakeup_rmdir_waiters(cgrp); | ||
3145 | } | 3282 | } |
3146 | rcu_read_unlock(); | 3283 | rcu_read_unlock(); |
3147 | } | 3284 | } |
@@ -3241,3 +3378,232 @@ static int __init cgroup_disable(char *str) | |||
3241 | return 1; | 3378 | return 1; |
3242 | } | 3379 | } |
3243 | __setup("cgroup_disable=", cgroup_disable); | 3380 | __setup("cgroup_disable=", cgroup_disable); |
3381 | |||
3382 | /* | ||
3383 | * Functons for CSS ID. | ||
3384 | */ | ||
3385 | |||
3386 | /* | ||
3387 | *To get ID other than 0, this should be called when !cgroup_is_removed(). | ||
3388 | */ | ||
3389 | unsigned short css_id(struct cgroup_subsys_state *css) | ||
3390 | { | ||
3391 | struct css_id *cssid = rcu_dereference(css->id); | ||
3392 | |||
3393 | if (cssid) | ||
3394 | return cssid->id; | ||
3395 | return 0; | ||
3396 | } | ||
3397 | |||
3398 | unsigned short css_depth(struct cgroup_subsys_state *css) | ||
3399 | { | ||
3400 | struct css_id *cssid = rcu_dereference(css->id); | ||
3401 | |||
3402 | if (cssid) | ||
3403 | return cssid->depth; | ||
3404 | return 0; | ||
3405 | } | ||
3406 | |||
3407 | bool css_is_ancestor(struct cgroup_subsys_state *child, | ||
3408 | const struct cgroup_subsys_state *root) | ||
3409 | { | ||
3410 | struct css_id *child_id = rcu_dereference(child->id); | ||
3411 | struct css_id *root_id = rcu_dereference(root->id); | ||
3412 | |||
3413 | if (!child_id || !root_id || (child_id->depth < root_id->depth)) | ||
3414 | return false; | ||
3415 | return child_id->stack[root_id->depth] == root_id->id; | ||
3416 | } | ||
3417 | |||
3418 | static void __free_css_id_cb(struct rcu_head *head) | ||
3419 | { | ||
3420 | struct css_id *id; | ||
3421 | |||
3422 | id = container_of(head, struct css_id, rcu_head); | ||
3423 | kfree(id); | ||
3424 | } | ||
3425 | |||
3426 | void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) | ||
3427 | { | ||
3428 | struct css_id *id = css->id; | ||
3429 | /* When this is called before css_id initialization, id can be NULL */ | ||
3430 | if (!id) | ||
3431 | return; | ||
3432 | |||
3433 | BUG_ON(!ss->use_id); | ||
3434 | |||
3435 | rcu_assign_pointer(id->css, NULL); | ||
3436 | rcu_assign_pointer(css->id, NULL); | ||
3437 | spin_lock(&ss->id_lock); | ||
3438 | idr_remove(&ss->idr, id->id); | ||
3439 | spin_unlock(&ss->id_lock); | ||
3440 | call_rcu(&id->rcu_head, __free_css_id_cb); | ||
3441 | } | ||
3442 | |||
3443 | /* | ||
3444 | * This is called by init or create(). Then, calls to this function are | ||
3445 | * always serialized (By cgroup_mutex() at create()). | ||
3446 | */ | ||
3447 | |||
3448 | static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth) | ||
3449 | { | ||
3450 | struct css_id *newid; | ||
3451 | int myid, error, size; | ||
3452 | |||
3453 | BUG_ON(!ss->use_id); | ||
3454 | |||
3455 | size = sizeof(*newid) + sizeof(unsigned short) * (depth + 1); | ||
3456 | newid = kzalloc(size, GFP_KERNEL); | ||
3457 | if (!newid) | ||
3458 | return ERR_PTR(-ENOMEM); | ||
3459 | /* get id */ | ||
3460 | if (unlikely(!idr_pre_get(&ss->idr, GFP_KERNEL))) { | ||
3461 | error = -ENOMEM; | ||
3462 | goto err_out; | ||
3463 | } | ||
3464 | spin_lock(&ss->id_lock); | ||
3465 | /* Don't use 0. allocates an ID of 1-65535 */ | ||
3466 | error = idr_get_new_above(&ss->idr, newid, 1, &myid); | ||
3467 | spin_unlock(&ss->id_lock); | ||
3468 | |||
3469 | /* Returns error when there are no free spaces for new ID.*/ | ||
3470 | if (error) { | ||
3471 | error = -ENOSPC; | ||
3472 | goto err_out; | ||
3473 | } | ||
3474 | if (myid > CSS_ID_MAX) | ||
3475 | goto remove_idr; | ||
3476 | |||
3477 | newid->id = myid; | ||
3478 | newid->depth = depth; | ||
3479 | return newid; | ||
3480 | remove_idr: | ||
3481 | error = -ENOSPC; | ||
3482 | spin_lock(&ss->id_lock); | ||
3483 | idr_remove(&ss->idr, myid); | ||
3484 | spin_unlock(&ss->id_lock); | ||
3485 | err_out: | ||
3486 | kfree(newid); | ||
3487 | return ERR_PTR(error); | ||
3488 | |||
3489 | } | ||
3490 | |||
3491 | static int __init cgroup_subsys_init_idr(struct cgroup_subsys *ss) | ||
3492 | { | ||
3493 | struct css_id *newid; | ||
3494 | struct cgroup_subsys_state *rootcss; | ||
3495 | |||
3496 | spin_lock_init(&ss->id_lock); | ||
3497 | idr_init(&ss->idr); | ||
3498 | |||
3499 | rootcss = init_css_set.subsys[ss->subsys_id]; | ||
3500 | newid = get_new_cssid(ss, 0); | ||
3501 | if (IS_ERR(newid)) | ||
3502 | return PTR_ERR(newid); | ||
3503 | |||
3504 | newid->stack[0] = newid->id; | ||
3505 | newid->css = rootcss; | ||
3506 | rootcss->id = newid; | ||
3507 | return 0; | ||
3508 | } | ||
3509 | |||
3510 | static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent, | ||
3511 | struct cgroup *child) | ||
3512 | { | ||
3513 | int subsys_id, i, depth = 0; | ||
3514 | struct cgroup_subsys_state *parent_css, *child_css; | ||
3515 | struct css_id *child_id, *parent_id = NULL; | ||
3516 | |||
3517 | subsys_id = ss->subsys_id; | ||
3518 | parent_css = parent->subsys[subsys_id]; | ||
3519 | child_css = child->subsys[subsys_id]; | ||
3520 | depth = css_depth(parent_css) + 1; | ||
3521 | parent_id = parent_css->id; | ||
3522 | |||
3523 | child_id = get_new_cssid(ss, depth); | ||
3524 | if (IS_ERR(child_id)) | ||
3525 | return PTR_ERR(child_id); | ||
3526 | |||
3527 | for (i = 0; i < depth; i++) | ||
3528 | child_id->stack[i] = parent_id->stack[i]; | ||
3529 | child_id->stack[depth] = child_id->id; | ||
3530 | /* | ||
3531 | * child_id->css pointer will be set after this cgroup is available | ||
3532 | * see cgroup_populate_dir() | ||
3533 | */ | ||
3534 | rcu_assign_pointer(child_css->id, child_id); | ||
3535 | |||
3536 | return 0; | ||
3537 | } | ||
3538 | |||
3539 | /** | ||
3540 | * css_lookup - lookup css by id | ||
3541 | * @ss: cgroup subsys to be looked into. | ||
3542 | * @id: the id | ||
3543 | * | ||
3544 | * Returns pointer to cgroup_subsys_state if there is valid one with id. | ||
3545 | * NULL if not. Should be called under rcu_read_lock() | ||
3546 | */ | ||
3547 | struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id) | ||
3548 | { | ||
3549 | struct css_id *cssid = NULL; | ||
3550 | |||
3551 | BUG_ON(!ss->use_id); | ||
3552 | cssid = idr_find(&ss->idr, id); | ||
3553 | |||
3554 | if (unlikely(!cssid)) | ||
3555 | return NULL; | ||
3556 | |||
3557 | return rcu_dereference(cssid->css); | ||
3558 | } | ||
3559 | |||
3560 | /** | ||
3561 | * css_get_next - lookup next cgroup under specified hierarchy. | ||
3562 | * @ss: pointer to subsystem | ||
3563 | * @id: current position of iteration. | ||
3564 | * @root: pointer to css. search tree under this. | ||
3565 | * @foundid: position of found object. | ||
3566 | * | ||
3567 | * Search next css under the specified hierarchy of rootid. Calling under | ||
3568 | * rcu_read_lock() is necessary. Returns NULL if it reaches the end. | ||
3569 | */ | ||
3570 | struct cgroup_subsys_state * | ||
3571 | css_get_next(struct cgroup_subsys *ss, int id, | ||
3572 | struct cgroup_subsys_state *root, int *foundid) | ||
3573 | { | ||
3574 | struct cgroup_subsys_state *ret = NULL; | ||
3575 | struct css_id *tmp; | ||
3576 | int tmpid; | ||
3577 | int rootid = css_id(root); | ||
3578 | int depth = css_depth(root); | ||
3579 | |||
3580 | if (!rootid) | ||
3581 | return NULL; | ||
3582 | |||
3583 | BUG_ON(!ss->use_id); | ||
3584 | /* fill start point for scan */ | ||
3585 | tmpid = id; | ||
3586 | while (1) { | ||
3587 | /* | ||
3588 | * scan next entry from bitmap(tree), tmpid is updated after | ||
3589 | * idr_get_next(). | ||
3590 | */ | ||
3591 | spin_lock(&ss->id_lock); | ||
3592 | tmp = idr_get_next(&ss->idr, &tmpid); | ||
3593 | spin_unlock(&ss->id_lock); | ||
3594 | |||
3595 | if (!tmp) | ||
3596 | break; | ||
3597 | if (tmp->depth >= depth && tmp->stack[depth] == rootid) { | ||
3598 | ret = rcu_dereference(tmp->css); | ||
3599 | if (ret) { | ||
3600 | *foundid = tmpid; | ||
3601 | break; | ||
3602 | } | ||
3603 | } | ||
3604 | /* continue to scan from next id */ | ||
3605 | tmpid = tmpid + 1; | ||
3606 | } | ||
3607 | return ret; | ||
3608 | } | ||
3609 | |||
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c index daca6209202d..0c92d797baa6 100644 --- a/kernel/cgroup_debug.c +++ b/kernel/cgroup_debug.c | |||
@@ -40,9 +40,7 @@ static u64 taskcount_read(struct cgroup *cont, struct cftype *cft) | |||
40 | { | 40 | { |
41 | u64 count; | 41 | u64 count; |
42 | 42 | ||
43 | cgroup_lock(); | ||
44 | count = cgroup_task_count(cont); | 43 | count = cgroup_task_count(cont); |
45 | cgroup_unlock(); | ||
46 | return count; | 44 | return count; |
47 | } | 45 | } |
48 | 46 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index f76db9dcaa05..026faccca869 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -128,10 +128,6 @@ static inline struct cpuset *task_cs(struct task_struct *task) | |||
128 | return container_of(task_subsys_state(task, cpuset_subsys_id), | 128 | return container_of(task_subsys_state(task, cpuset_subsys_id), |
129 | struct cpuset, css); | 129 | struct cpuset, css); |
130 | } | 130 | } |
131 | struct cpuset_hotplug_scanner { | ||
132 | struct cgroup_scanner scan; | ||
133 | struct cgroup *to; | ||
134 | }; | ||
135 | 131 | ||
136 | /* bits in struct cpuset flags field */ | 132 | /* bits in struct cpuset flags field */ |
137 | typedef enum { | 133 | typedef enum { |
@@ -521,6 +517,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |||
521 | return 0; | 517 | return 0; |
522 | } | 518 | } |
523 | 519 | ||
520 | #ifdef CONFIG_SMP | ||
524 | /* | 521 | /* |
525 | * Helper routine for generate_sched_domains(). | 522 | * Helper routine for generate_sched_domains(). |
526 | * Do cpusets a, b have overlapping cpus_allowed masks? | 523 | * Do cpusets a, b have overlapping cpus_allowed masks? |
@@ -815,6 +812,18 @@ static void do_rebuild_sched_domains(struct work_struct *unused) | |||
815 | 812 | ||
816 | put_online_cpus(); | 813 | put_online_cpus(); |
817 | } | 814 | } |
815 | #else /* !CONFIG_SMP */ | ||
816 | static void do_rebuild_sched_domains(struct work_struct *unused) | ||
817 | { | ||
818 | } | ||
819 | |||
820 | static int generate_sched_domains(struct cpumask **domains, | ||
821 | struct sched_domain_attr **attributes) | ||
822 | { | ||
823 | *domains = NULL; | ||
824 | return 1; | ||
825 | } | ||
826 | #endif /* CONFIG_SMP */ | ||
818 | 827 | ||
819 | static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); | 828 | static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains); |
820 | 829 | ||
@@ -1026,101 +1035,70 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
1026 | mutex_unlock(&callback_mutex); | 1035 | mutex_unlock(&callback_mutex); |
1027 | } | 1036 | } |
1028 | 1037 | ||
1038 | /* | ||
1039 | * Rebind task's vmas to cpuset's new mems_allowed, and migrate pages to new | ||
1040 | * nodes if memory_migrate flag is set. Called with cgroup_mutex held. | ||
1041 | */ | ||
1042 | static void cpuset_change_nodemask(struct task_struct *p, | ||
1043 | struct cgroup_scanner *scan) | ||
1044 | { | ||
1045 | struct mm_struct *mm; | ||
1046 | struct cpuset *cs; | ||
1047 | int migrate; | ||
1048 | const nodemask_t *oldmem = scan->data; | ||
1049 | |||
1050 | mm = get_task_mm(p); | ||
1051 | if (!mm) | ||
1052 | return; | ||
1053 | |||
1054 | cs = cgroup_cs(scan->cg); | ||
1055 | migrate = is_memory_migrate(cs); | ||
1056 | |||
1057 | mpol_rebind_mm(mm, &cs->mems_allowed); | ||
1058 | if (migrate) | ||
1059 | cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed); | ||
1060 | mmput(mm); | ||
1061 | } | ||
1062 | |||
1029 | static void *cpuset_being_rebound; | 1063 | static void *cpuset_being_rebound; |
1030 | 1064 | ||
1031 | /** | 1065 | /** |
1032 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. | 1066 | * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. |
1033 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed | 1067 | * @cs: the cpuset in which each task's mems_allowed mask needs to be changed |
1034 | * @oldmem: old mems_allowed of cpuset cs | 1068 | * @oldmem: old mems_allowed of cpuset cs |
1069 | * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks() | ||
1035 | * | 1070 | * |
1036 | * Called with cgroup_mutex held | 1071 | * Called with cgroup_mutex held |
1037 | * Return 0 if successful, -errno if not. | 1072 | * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0 |
1073 | * if @heap != NULL. | ||
1038 | */ | 1074 | */ |
1039 | static int update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem) | 1075 | static void update_tasks_nodemask(struct cpuset *cs, const nodemask_t *oldmem, |
1076 | struct ptr_heap *heap) | ||
1040 | { | 1077 | { |
1041 | struct task_struct *p; | 1078 | struct cgroup_scanner scan; |
1042 | struct mm_struct **mmarray; | ||
1043 | int i, n, ntasks; | ||
1044 | int migrate; | ||
1045 | int fudge; | ||
1046 | struct cgroup_iter it; | ||
1047 | int retval; | ||
1048 | 1079 | ||
1049 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ | 1080 | cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ |
1050 | 1081 | ||
1051 | fudge = 10; /* spare mmarray[] slots */ | 1082 | scan.cg = cs->css.cgroup; |
1052 | fudge += cpumask_weight(cs->cpus_allowed);/* imagine 1 fork-bomb/cpu */ | 1083 | scan.test_task = NULL; |
1053 | retval = -ENOMEM; | 1084 | scan.process_task = cpuset_change_nodemask; |
1054 | 1085 | scan.heap = heap; | |
1055 | /* | 1086 | scan.data = (nodemask_t *)oldmem; |
1056 | * Allocate mmarray[] to hold mm reference for each task | ||
1057 | * in cpuset cs. Can't kmalloc GFP_KERNEL while holding | ||
1058 | * tasklist_lock. We could use GFP_ATOMIC, but with a | ||
1059 | * few more lines of code, we can retry until we get a big | ||
1060 | * enough mmarray[] w/o using GFP_ATOMIC. | ||
1061 | */ | ||
1062 | while (1) { | ||
1063 | ntasks = cgroup_task_count(cs->css.cgroup); /* guess */ | ||
1064 | ntasks += fudge; | ||
1065 | mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); | ||
1066 | if (!mmarray) | ||
1067 | goto done; | ||
1068 | read_lock(&tasklist_lock); /* block fork */ | ||
1069 | if (cgroup_task_count(cs->css.cgroup) <= ntasks) | ||
1070 | break; /* got enough */ | ||
1071 | read_unlock(&tasklist_lock); /* try again */ | ||
1072 | kfree(mmarray); | ||
1073 | } | ||
1074 | |||
1075 | n = 0; | ||
1076 | |||
1077 | /* Load up mmarray[] with mm reference for each task in cpuset. */ | ||
1078 | cgroup_iter_start(cs->css.cgroup, &it); | ||
1079 | while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { | ||
1080 | struct mm_struct *mm; | ||
1081 | |||
1082 | if (n >= ntasks) { | ||
1083 | printk(KERN_WARNING | ||
1084 | "Cpuset mempolicy rebind incomplete.\n"); | ||
1085 | break; | ||
1086 | } | ||
1087 | mm = get_task_mm(p); | ||
1088 | if (!mm) | ||
1089 | continue; | ||
1090 | mmarray[n++] = mm; | ||
1091 | } | ||
1092 | cgroup_iter_end(cs->css.cgroup, &it); | ||
1093 | read_unlock(&tasklist_lock); | ||
1094 | 1087 | ||
1095 | /* | 1088 | /* |
1096 | * Now that we've dropped the tasklist spinlock, we can | 1089 | * The mpol_rebind_mm() call takes mmap_sem, which we couldn't |
1097 | * rebind the vma mempolicies of each mm in mmarray[] to their | 1090 | * take while holding tasklist_lock. Forks can happen - the |
1098 | * new cpuset, and release that mm. The mpol_rebind_mm() | 1091 | * mpol_dup() cpuset_being_rebound check will catch such forks, |
1099 | * call takes mmap_sem, which we couldn't take while holding | 1092 | * and rebind their vma mempolicies too. Because we still hold |
1100 | * tasklist_lock. Forks can happen again now - the mpol_dup() | 1093 | * the global cgroup_mutex, we know that no other rebind effort |
1101 | * cpuset_being_rebound check will catch such forks, and rebind | 1094 | * will be contending for the global variable cpuset_being_rebound. |
1102 | * their vma mempolicies too. Because we still hold the global | ||
1103 | * cgroup_mutex, we know that no other rebind effort will | ||
1104 | * be contending for the global variable cpuset_being_rebound. | ||
1105 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() | 1095 | * It's ok if we rebind the same mm twice; mpol_rebind_mm() |
1106 | * is idempotent. Also migrate pages in each mm to new nodes. | 1096 | * is idempotent. Also migrate pages in each mm to new nodes. |
1107 | */ | 1097 | */ |
1108 | migrate = is_memory_migrate(cs); | 1098 | cgroup_scan_tasks(&scan); |
1109 | for (i = 0; i < n; i++) { | ||
1110 | struct mm_struct *mm = mmarray[i]; | ||
1111 | |||
1112 | mpol_rebind_mm(mm, &cs->mems_allowed); | ||
1113 | if (migrate) | ||
1114 | cpuset_migrate_mm(mm, oldmem, &cs->mems_allowed); | ||
1115 | mmput(mm); | ||
1116 | } | ||
1117 | 1099 | ||
1118 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ | 1100 | /* We're done rebinding vmas to this cpuset's new mems_allowed. */ |
1119 | kfree(mmarray); | ||
1120 | cpuset_being_rebound = NULL; | 1101 | cpuset_being_rebound = NULL; |
1121 | retval = 0; | ||
1122 | done: | ||
1123 | return retval; | ||
1124 | } | 1102 | } |
1125 | 1103 | ||
1126 | /* | 1104 | /* |
@@ -1141,6 +1119,7 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1141 | { | 1119 | { |
1142 | nodemask_t oldmem; | 1120 | nodemask_t oldmem; |
1143 | int retval; | 1121 | int retval; |
1122 | struct ptr_heap heap; | ||
1144 | 1123 | ||
1145 | /* | 1124 | /* |
1146 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | 1125 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; |
@@ -1175,12 +1154,18 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, | |||
1175 | if (retval < 0) | 1154 | if (retval < 0) |
1176 | goto done; | 1155 | goto done; |
1177 | 1156 | ||
1157 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL); | ||
1158 | if (retval < 0) | ||
1159 | goto done; | ||
1160 | |||
1178 | mutex_lock(&callback_mutex); | 1161 | mutex_lock(&callback_mutex); |
1179 | cs->mems_allowed = trialcs->mems_allowed; | 1162 | cs->mems_allowed = trialcs->mems_allowed; |
1180 | cs->mems_generation = cpuset_mems_generation++; | 1163 | cs->mems_generation = cpuset_mems_generation++; |
1181 | mutex_unlock(&callback_mutex); | 1164 | mutex_unlock(&callback_mutex); |
1182 | 1165 | ||
1183 | retval = update_tasks_nodemask(cs, &oldmem); | 1166 | update_tasks_nodemask(cs, &oldmem, &heap); |
1167 | |||
1168 | heap_free(&heap); | ||
1184 | done: | 1169 | done: |
1185 | return retval; | 1170 | return retval; |
1186 | } | 1171 | } |
@@ -1192,8 +1177,10 @@ int current_cpuset_is_being_rebound(void) | |||
1192 | 1177 | ||
1193 | static int update_relax_domain_level(struct cpuset *cs, s64 val) | 1178 | static int update_relax_domain_level(struct cpuset *cs, s64 val) |
1194 | { | 1179 | { |
1180 | #ifdef CONFIG_SMP | ||
1195 | if (val < -1 || val >= SD_LV_MAX) | 1181 | if (val < -1 || val >= SD_LV_MAX) |
1196 | return -EINVAL; | 1182 | return -EINVAL; |
1183 | #endif | ||
1197 | 1184 | ||
1198 | if (val != cs->relax_domain_level) { | 1185 | if (val != cs->relax_domain_level) { |
1199 | cs->relax_domain_level = val; | 1186 | cs->relax_domain_level = val; |
@@ -1355,19 +1342,22 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, | |||
1355 | struct cgroup *cont, struct task_struct *tsk) | 1342 | struct cgroup *cont, struct task_struct *tsk) |
1356 | { | 1343 | { |
1357 | struct cpuset *cs = cgroup_cs(cont); | 1344 | struct cpuset *cs = cgroup_cs(cont); |
1358 | int ret = 0; | ||
1359 | 1345 | ||
1360 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1346 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
1361 | return -ENOSPC; | 1347 | return -ENOSPC; |
1362 | 1348 | ||
1363 | if (tsk->flags & PF_THREAD_BOUND) { | 1349 | /* |
1364 | mutex_lock(&callback_mutex); | 1350 | * Kthreads bound to specific cpus cannot be moved to a new cpuset; we |
1365 | if (!cpumask_equal(&tsk->cpus_allowed, cs->cpus_allowed)) | 1351 | * cannot change their cpu affinity and isolating such threads by their |
1366 | ret = -EINVAL; | 1352 | * set of allowed nodes is unnecessary. Thus, cpusets are not |
1367 | mutex_unlock(&callback_mutex); | 1353 | * applicable for such threads. This prevents checking for success of |
1368 | } | 1354 | * set_cpus_allowed_ptr() on all attached tasks before cpus_allowed may |
1355 | * be changed. | ||
1356 | */ | ||
1357 | if (tsk->flags & PF_THREAD_BOUND) | ||
1358 | return -EINVAL; | ||
1369 | 1359 | ||
1370 | return ret < 0 ? ret : security_task_setscheduler(tsk, 0, NULL); | 1360 | return security_task_setscheduler(tsk, 0, NULL); |
1371 | } | 1361 | } |
1372 | 1362 | ||
1373 | static void cpuset_attach(struct cgroup_subsys *ss, | 1363 | static void cpuset_attach(struct cgroup_subsys *ss, |
@@ -1706,6 +1696,7 @@ static struct cftype files[] = { | |||
1706 | .read_u64 = cpuset_read_u64, | 1696 | .read_u64 = cpuset_read_u64, |
1707 | .write_u64 = cpuset_write_u64, | 1697 | .write_u64 = cpuset_write_u64, |
1708 | .private = FILE_MEMORY_PRESSURE, | 1698 | .private = FILE_MEMORY_PRESSURE, |
1699 | .mode = S_IRUGO, | ||
1709 | }, | 1700 | }, |
1710 | 1701 | ||
1711 | { | 1702 | { |
@@ -1913,10 +1904,9 @@ int __init cpuset_init(void) | |||
1913 | static void cpuset_do_move_task(struct task_struct *tsk, | 1904 | static void cpuset_do_move_task(struct task_struct *tsk, |
1914 | struct cgroup_scanner *scan) | 1905 | struct cgroup_scanner *scan) |
1915 | { | 1906 | { |
1916 | struct cpuset_hotplug_scanner *chsp; | 1907 | struct cgroup *new_cgroup = scan->data; |
1917 | 1908 | ||
1918 | chsp = container_of(scan, struct cpuset_hotplug_scanner, scan); | 1909 | cgroup_attach_task(new_cgroup, tsk); |
1919 | cgroup_attach_task(chsp->to, tsk); | ||
1920 | } | 1910 | } |
1921 | 1911 | ||
1922 | /** | 1912 | /** |
@@ -1932,15 +1922,15 @@ static void cpuset_do_move_task(struct task_struct *tsk, | |||
1932 | */ | 1922 | */ |
1933 | static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) | 1923 | static void move_member_tasks_to_cpuset(struct cpuset *from, struct cpuset *to) |
1934 | { | 1924 | { |
1935 | struct cpuset_hotplug_scanner scan; | 1925 | struct cgroup_scanner scan; |
1936 | 1926 | ||
1937 | scan.scan.cg = from->css.cgroup; | 1927 | scan.cg = from->css.cgroup; |
1938 | scan.scan.test_task = NULL; /* select all tasks in cgroup */ | 1928 | scan.test_task = NULL; /* select all tasks in cgroup */ |
1939 | scan.scan.process_task = cpuset_do_move_task; | 1929 | scan.process_task = cpuset_do_move_task; |
1940 | scan.scan.heap = NULL; | 1930 | scan.heap = NULL; |
1941 | scan.to = to->css.cgroup; | 1931 | scan.data = to->css.cgroup; |
1942 | 1932 | ||
1943 | if (cgroup_scan_tasks(&scan.scan)) | 1933 | if (cgroup_scan_tasks(&scan)) |
1944 | printk(KERN_ERR "move_member_tasks_to_cpuset: " | 1934 | printk(KERN_ERR "move_member_tasks_to_cpuset: " |
1945 | "cgroup_scan_tasks failed\n"); | 1935 | "cgroup_scan_tasks failed\n"); |
1946 | } | 1936 | } |
@@ -2033,7 +2023,7 @@ static void scan_for_empty_cpusets(struct cpuset *root) | |||
2033 | remove_tasks_in_empty_cpuset(cp); | 2023 | remove_tasks_in_empty_cpuset(cp); |
2034 | else { | 2024 | else { |
2035 | update_tasks_cpumask(cp, NULL); | 2025 | update_tasks_cpumask(cp, NULL); |
2036 | update_tasks_nodemask(cp, &oldmems); | 2026 | update_tasks_nodemask(cp, &oldmems, NULL); |
2037 | } | 2027 | } |
2038 | } | 2028 | } |
2039 | } | 2029 | } |
@@ -2069,7 +2059,9 @@ static int cpuset_track_online_cpus(struct notifier_block *unused_nb, | |||
2069 | } | 2059 | } |
2070 | 2060 | ||
2071 | cgroup_lock(); | 2061 | cgroup_lock(); |
2062 | mutex_lock(&callback_mutex); | ||
2072 | cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); | 2063 | cpumask_copy(top_cpuset.cpus_allowed, cpu_online_mask); |
2064 | mutex_unlock(&callback_mutex); | ||
2073 | scan_for_empty_cpusets(&top_cpuset); | 2065 | scan_for_empty_cpusets(&top_cpuset); |
2074 | ndoms = generate_sched_domains(&doms, &attr); | 2066 | ndoms = generate_sched_domains(&doms, &attr); |
2075 | cgroup_unlock(); | 2067 | cgroup_unlock(); |
@@ -2092,11 +2084,12 @@ static int cpuset_track_online_nodes(struct notifier_block *self, | |||
2092 | cgroup_lock(); | 2084 | cgroup_lock(); |
2093 | switch (action) { | 2085 | switch (action) { |
2094 | case MEM_ONLINE: | 2086 | case MEM_ONLINE: |
2095 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | ||
2096 | break; | ||
2097 | case MEM_OFFLINE: | 2087 | case MEM_OFFLINE: |
2088 | mutex_lock(&callback_mutex); | ||
2098 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 2089 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
2099 | scan_for_empty_cpusets(&top_cpuset); | 2090 | mutex_unlock(&callback_mutex); |
2091 | if (action == MEM_OFFLINE) | ||
2092 | scan_for_empty_cpusets(&top_cpuset); | ||
2100 | break; | 2093 | break; |
2101 | default: | 2094 | default: |
2102 | break; | 2095 | break; |
@@ -2206,26 +2199,24 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) | |||
2206 | } | 2199 | } |
2207 | 2200 | ||
2208 | /** | 2201 | /** |
2209 | * cpuset_zone_allowed_softwall - Can we allocate on zone z's memory node? | 2202 | * cpuset_node_allowed_softwall - Can we allocate on a memory node? |
2210 | * @z: is this zone on an allowed node? | 2203 | * @node: is this an allowed node? |
2211 | * @gfp_mask: memory allocation flags | 2204 | * @gfp_mask: memory allocation flags |
2212 | * | 2205 | * |
2213 | * If we're in interrupt, yes, we can always allocate. If | 2206 | * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is |
2214 | * __GFP_THISNODE is set, yes, we can always allocate. If zone | 2207 | * set, yes, we can always allocate. If node is in our task's mems_allowed, |
2215 | * z's node is in our tasks mems_allowed, yes. If it's not a | 2208 | * yes. If it's not a __GFP_HARDWALL request and this node is in the nearest |
2216 | * __GFP_HARDWALL request and this zone's nodes is in the nearest | 2209 | * hardwalled cpuset ancestor to this task's cpuset, yes. If the task has been |
2217 | * hardwalled cpuset ancestor to this tasks cpuset, yes. | 2210 | * OOM killed and has access to memory reserves as specified by the TIF_MEMDIE |
2218 | * If the task has been OOM killed and has access to memory reserves | 2211 | * flag, yes. |
2219 | * as specified by the TIF_MEMDIE flag, yes. | ||
2220 | * Otherwise, no. | 2212 | * Otherwise, no. |
2221 | * | 2213 | * |
2222 | * If __GFP_HARDWALL is set, cpuset_zone_allowed_softwall() | 2214 | * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to |
2223 | * reduces to cpuset_zone_allowed_hardwall(). Otherwise, | 2215 | * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall() |
2224 | * cpuset_zone_allowed_softwall() might sleep, and might allow a zone | 2216 | * might sleep, and might allow a node from an enclosing cpuset. |
2225 | * from an enclosing cpuset. | ||
2226 | * | 2217 | * |
2227 | * cpuset_zone_allowed_hardwall() only handles the simpler case of | 2218 | * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall |
2228 | * hardwall cpusets, and never sleeps. | 2219 | * cpusets, and never sleeps. |
2229 | * | 2220 | * |
2230 | * The __GFP_THISNODE placement logic is really handled elsewhere, | 2221 | * The __GFP_THISNODE placement logic is really handled elsewhere, |
2231 | * by forcibly using a zonelist starting at a specified node, and by | 2222 | * by forcibly using a zonelist starting at a specified node, and by |
@@ -2264,20 +2255,17 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) | |||
2264 | * GFP_USER - only nodes in current tasks mems allowed ok. | 2255 | * GFP_USER - only nodes in current tasks mems allowed ok. |
2265 | * | 2256 | * |
2266 | * Rule: | 2257 | * Rule: |
2267 | * Don't call cpuset_zone_allowed_softwall if you can't sleep, unless you | 2258 | * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you |
2268 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables | 2259 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables |
2269 | * the code that might scan up ancestor cpusets and sleep. | 2260 | * the code that might scan up ancestor cpusets and sleep. |
2270 | */ | 2261 | */ |
2271 | 2262 | int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) | |
2272 | int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | ||
2273 | { | 2263 | { |
2274 | int node; /* node that zone z is on */ | ||
2275 | const struct cpuset *cs; /* current cpuset ancestors */ | 2264 | const struct cpuset *cs; /* current cpuset ancestors */ |
2276 | int allowed; /* is allocation in zone z allowed? */ | 2265 | int allowed; /* is allocation in zone z allowed? */ |
2277 | 2266 | ||
2278 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | 2267 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
2279 | return 1; | 2268 | return 1; |
2280 | node = zone_to_nid(z); | ||
2281 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); | 2269 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); |
2282 | if (node_isset(node, current->mems_allowed)) | 2270 | if (node_isset(node, current->mems_allowed)) |
2283 | return 1; | 2271 | return 1; |
@@ -2306,15 +2294,15 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |||
2306 | } | 2294 | } |
2307 | 2295 | ||
2308 | /* | 2296 | /* |
2309 | * cpuset_zone_allowed_hardwall - Can we allocate on zone z's memory node? | 2297 | * cpuset_node_allowed_hardwall - Can we allocate on a memory node? |
2310 | * @z: is this zone on an allowed node? | 2298 | * @node: is this an allowed node? |
2311 | * @gfp_mask: memory allocation flags | 2299 | * @gfp_mask: memory allocation flags |
2312 | * | 2300 | * |
2313 | * If we're in interrupt, yes, we can always allocate. | 2301 | * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is |
2314 | * If __GFP_THISNODE is set, yes, we can always allocate. If zone | 2302 | * set, yes, we can always allocate. If node is in our task's mems_allowed, |
2315 | * z's node is in our tasks mems_allowed, yes. If the task has been | 2303 | * yes. If the task has been OOM killed and has access to memory reserves as |
2316 | * OOM killed and has access to memory reserves as specified by the | 2304 | * specified by the TIF_MEMDIE flag, yes. |
2317 | * TIF_MEMDIE flag, yes. Otherwise, no. | 2305 | * Otherwise, no. |
2318 | * | 2306 | * |
2319 | * The __GFP_THISNODE placement logic is really handled elsewhere, | 2307 | * The __GFP_THISNODE placement logic is really handled elsewhere, |
2320 | * by forcibly using a zonelist starting at a specified node, and by | 2308 | * by forcibly using a zonelist starting at a specified node, and by |
@@ -2322,20 +2310,16 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |||
2322 | * any node on the zonelist except the first. By the time any such | 2310 | * any node on the zonelist except the first. By the time any such |
2323 | * calls get to this routine, we should just shut up and say 'yes'. | 2311 | * calls get to this routine, we should just shut up and say 'yes'. |
2324 | * | 2312 | * |
2325 | * Unlike the cpuset_zone_allowed_softwall() variant, above, | 2313 | * Unlike the cpuset_node_allowed_softwall() variant, above, |
2326 | * this variant requires that the zone be in the current tasks | 2314 | * this variant requires that the node be in the current task's |
2327 | * mems_allowed or that we're in interrupt. It does not scan up the | 2315 | * mems_allowed or that we're in interrupt. It does not scan up the |
2328 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. | 2316 | * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset. |
2329 | * It never sleeps. | 2317 | * It never sleeps. |
2330 | */ | 2318 | */ |
2331 | 2319 | int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) | |
2332 | int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) | ||
2333 | { | 2320 | { |
2334 | int node; /* node that zone z is on */ | ||
2335 | |||
2336 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) | 2321 | if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) |
2337 | return 1; | 2322 | return 1; |
2338 | node = zone_to_nid(z); | ||
2339 | if (node_isset(node, current->mems_allowed)) | 2323 | if (node_isset(node, current->mems_allowed)) |
2340 | return 1; | 2324 | return 1; |
2341 | /* | 2325 | /* |
diff --git a/kernel/exit.c b/kernel/exit.c index b5d656845c90..6686ed1e4aa3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -62,11 +62,6 @@ DEFINE_TRACE(sched_process_wait); | |||
62 | 62 | ||
63 | static void exit_mm(struct task_struct * tsk); | 63 | static void exit_mm(struct task_struct * tsk); |
64 | 64 | ||
65 | static inline int task_detached(struct task_struct *p) | ||
66 | { | ||
67 | return p->exit_signal == -1; | ||
68 | } | ||
69 | |||
70 | static void __unhash_process(struct task_struct *p) | 65 | static void __unhash_process(struct task_struct *p) |
71 | { | 66 | { |
72 | nr_threads--; | 67 | nr_threads--; |
@@ -363,16 +358,12 @@ static void reparent_to_kthreadd(void) | |||
363 | void __set_special_pids(struct pid *pid) | 358 | void __set_special_pids(struct pid *pid) |
364 | { | 359 | { |
365 | struct task_struct *curr = current->group_leader; | 360 | struct task_struct *curr = current->group_leader; |
366 | pid_t nr = pid_nr(pid); | ||
367 | 361 | ||
368 | if (task_session(curr) != pid) { | 362 | if (task_session(curr) != pid) |
369 | change_pid(curr, PIDTYPE_SID, pid); | 363 | change_pid(curr, PIDTYPE_SID, pid); |
370 | set_task_session(curr, nr); | 364 | |
371 | } | 365 | if (task_pgrp(curr) != pid) |
372 | if (task_pgrp(curr) != pid) { | ||
373 | change_pid(curr, PIDTYPE_PGID, pid); | 366 | change_pid(curr, PIDTYPE_PGID, pid); |
374 | set_task_pgrp(curr, nr); | ||
375 | } | ||
376 | } | 367 | } |
377 | 368 | ||
378 | static void set_special_pids(struct pid *pid) | 369 | static void set_special_pids(struct pid *pid) |
@@ -704,119 +695,6 @@ static void exit_mm(struct task_struct * tsk) | |||
704 | } | 695 | } |
705 | 696 | ||
706 | /* | 697 | /* |
707 | * Return nonzero if @parent's children should reap themselves. | ||
708 | * | ||
709 | * Called with write_lock_irq(&tasklist_lock) held. | ||
710 | */ | ||
711 | static int ignoring_children(struct task_struct *parent) | ||
712 | { | ||
713 | int ret; | ||
714 | struct sighand_struct *psig = parent->sighand; | ||
715 | unsigned long flags; | ||
716 | spin_lock_irqsave(&psig->siglock, flags); | ||
717 | ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || | ||
718 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT)); | ||
719 | spin_unlock_irqrestore(&psig->siglock, flags); | ||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | /* | ||
724 | * Detach all tasks we were using ptrace on. | ||
725 | * Any that need to be release_task'd are put on the @dead list. | ||
726 | * | ||
727 | * Called with write_lock(&tasklist_lock) held. | ||
728 | */ | ||
729 | static void ptrace_exit(struct task_struct *parent, struct list_head *dead) | ||
730 | { | ||
731 | struct task_struct *p, *n; | ||
732 | int ign = -1; | ||
733 | |||
734 | list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) { | ||
735 | __ptrace_unlink(p); | ||
736 | |||
737 | if (p->exit_state != EXIT_ZOMBIE) | ||
738 | continue; | ||
739 | |||
740 | /* | ||
741 | * If it's a zombie, our attachedness prevented normal | ||
742 | * parent notification or self-reaping. Do notification | ||
743 | * now if it would have happened earlier. If it should | ||
744 | * reap itself, add it to the @dead list. We can't call | ||
745 | * release_task() here because we already hold tasklist_lock. | ||
746 | * | ||
747 | * If it's our own child, there is no notification to do. | ||
748 | * But if our normal children self-reap, then this child | ||
749 | * was prevented by ptrace and we must reap it now. | ||
750 | */ | ||
751 | if (!task_detached(p) && thread_group_empty(p)) { | ||
752 | if (!same_thread_group(p->real_parent, parent)) | ||
753 | do_notify_parent(p, p->exit_signal); | ||
754 | else { | ||
755 | if (ign < 0) | ||
756 | ign = ignoring_children(parent); | ||
757 | if (ign) | ||
758 | p->exit_signal = -1; | ||
759 | } | ||
760 | } | ||
761 | |||
762 | if (task_detached(p)) { | ||
763 | /* | ||
764 | * Mark it as in the process of being reaped. | ||
765 | */ | ||
766 | p->exit_state = EXIT_DEAD; | ||
767 | list_add(&p->ptrace_entry, dead); | ||
768 | } | ||
769 | } | ||
770 | } | ||
771 | |||
772 | /* | ||
773 | * Finish up exit-time ptrace cleanup. | ||
774 | * | ||
775 | * Called without locks. | ||
776 | */ | ||
777 | static void ptrace_exit_finish(struct task_struct *parent, | ||
778 | struct list_head *dead) | ||
779 | { | ||
780 | struct task_struct *p, *n; | ||
781 | |||
782 | BUG_ON(!list_empty(&parent->ptraced)); | ||
783 | |||
784 | list_for_each_entry_safe(p, n, dead, ptrace_entry) { | ||
785 | list_del_init(&p->ptrace_entry); | ||
786 | release_task(p); | ||
787 | } | ||
788 | } | ||
789 | |||
790 | static void reparent_thread(struct task_struct *p, struct task_struct *father) | ||
791 | { | ||
792 | if (p->pdeath_signal) | ||
793 | /* We already hold the tasklist_lock here. */ | ||
794 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
795 | |||
796 | list_move_tail(&p->sibling, &p->real_parent->children); | ||
797 | |||
798 | /* If this is a threaded reparent there is no need to | ||
799 | * notify anyone anything has happened. | ||
800 | */ | ||
801 | if (same_thread_group(p->real_parent, father)) | ||
802 | return; | ||
803 | |||
804 | /* We don't want people slaying init. */ | ||
805 | if (!task_detached(p)) | ||
806 | p->exit_signal = SIGCHLD; | ||
807 | |||
808 | /* If we'd notified the old parent about this child's death, | ||
809 | * also notify the new parent. | ||
810 | */ | ||
811 | if (!ptrace_reparented(p) && | ||
812 | p->exit_state == EXIT_ZOMBIE && | ||
813 | !task_detached(p) && thread_group_empty(p)) | ||
814 | do_notify_parent(p, p->exit_signal); | ||
815 | |||
816 | kill_orphaned_pgrp(p, father); | ||
817 | } | ||
818 | |||
819 | /* | ||
820 | * When we die, we re-parent all our children. | 698 | * When we die, we re-parent all our children. |
821 | * Try to give them to another thread in our thread | 699 | * Try to give them to another thread in our thread |
822 | * group, and if no such member exists, give it to | 700 | * group, and if no such member exists, give it to |
@@ -855,17 +733,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father) | |||
855 | return pid_ns->child_reaper; | 733 | return pid_ns->child_reaper; |
856 | } | 734 | } |
857 | 735 | ||
736 | /* | ||
737 | * Any that need to be release_task'd are put on the @dead list. | ||
738 | */ | ||
739 | static void reparent_thread(struct task_struct *father, struct task_struct *p, | ||
740 | struct list_head *dead) | ||
741 | { | ||
742 | if (p->pdeath_signal) | ||
743 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
744 | |||
745 | list_move_tail(&p->sibling, &p->real_parent->children); | ||
746 | |||
747 | if (task_detached(p)) | ||
748 | return; | ||
749 | /* | ||
750 | * If this is a threaded reparent there is no need to | ||
751 | * notify anyone anything has happened. | ||
752 | */ | ||
753 | if (same_thread_group(p->real_parent, father)) | ||
754 | return; | ||
755 | |||
756 | /* We don't want people slaying init. */ | ||
757 | p->exit_signal = SIGCHLD; | ||
758 | |||
759 | /* If it has exited notify the new parent about this child's death. */ | ||
760 | if (!p->ptrace && | ||
761 | p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { | ||
762 | do_notify_parent(p, p->exit_signal); | ||
763 | if (task_detached(p)) { | ||
764 | p->exit_state = EXIT_DEAD; | ||
765 | list_move_tail(&p->sibling, dead); | ||
766 | } | ||
767 | } | ||
768 | |||
769 | kill_orphaned_pgrp(p, father); | ||
770 | } | ||
771 | |||
858 | static void forget_original_parent(struct task_struct *father) | 772 | static void forget_original_parent(struct task_struct *father) |
859 | { | 773 | { |
860 | struct task_struct *p, *n, *reaper; | 774 | struct task_struct *p, *n, *reaper; |
861 | LIST_HEAD(ptrace_dead); | 775 | LIST_HEAD(dead_children); |
776 | |||
777 | exit_ptrace(father); | ||
862 | 778 | ||
863 | write_lock_irq(&tasklist_lock); | 779 | write_lock_irq(&tasklist_lock); |
864 | reaper = find_new_reaper(father); | 780 | reaper = find_new_reaper(father); |
865 | /* | ||
866 | * First clean up ptrace if we were using it. | ||
867 | */ | ||
868 | ptrace_exit(father, &ptrace_dead); | ||
869 | 781 | ||
870 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 782 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
871 | p->real_parent = reaper; | 783 | p->real_parent = reaper; |
@@ -873,13 +785,16 @@ static void forget_original_parent(struct task_struct *father) | |||
873 | BUG_ON(p->ptrace); | 785 | BUG_ON(p->ptrace); |
874 | p->parent = p->real_parent; | 786 | p->parent = p->real_parent; |
875 | } | 787 | } |
876 | reparent_thread(p, father); | 788 | reparent_thread(father, p, &dead_children); |
877 | } | 789 | } |
878 | |||
879 | write_unlock_irq(&tasklist_lock); | 790 | write_unlock_irq(&tasklist_lock); |
791 | |||
880 | BUG_ON(!list_empty(&father->children)); | 792 | BUG_ON(!list_empty(&father->children)); |
881 | 793 | ||
882 | ptrace_exit_finish(father, &ptrace_dead); | 794 | list_for_each_entry_safe(p, n, &dead_children, sibling) { |
795 | list_del_init(&p->sibling); | ||
796 | release_task(p); | ||
797 | } | ||
883 | } | 798 | } |
884 | 799 | ||
885 | /* | 800 | /* |
@@ -1389,6 +1304,18 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1389 | return retval; | 1304 | return retval; |
1390 | } | 1305 | } |
1391 | 1306 | ||
1307 | static int *task_stopped_code(struct task_struct *p, bool ptrace) | ||
1308 | { | ||
1309 | if (ptrace) { | ||
1310 | if (task_is_stopped_or_traced(p)) | ||
1311 | return &p->exit_code; | ||
1312 | } else { | ||
1313 | if (p->signal->flags & SIGNAL_STOP_STOPPED) | ||
1314 | return &p->signal->group_exit_code; | ||
1315 | } | ||
1316 | return NULL; | ||
1317 | } | ||
1318 | |||
1392 | /* | 1319 | /* |
1393 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold | 1320 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold |
1394 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | 1321 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
@@ -1399,7 +1326,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
1399 | int options, struct siginfo __user *infop, | 1326 | int options, struct siginfo __user *infop, |
1400 | int __user *stat_addr, struct rusage __user *ru) | 1327 | int __user *stat_addr, struct rusage __user *ru) |
1401 | { | 1328 | { |
1402 | int retval, exit_code, why; | 1329 | int retval, exit_code, *p_code, why; |
1403 | uid_t uid = 0; /* unneeded, required by compiler */ | 1330 | uid_t uid = 0; /* unneeded, required by compiler */ |
1404 | pid_t pid; | 1331 | pid_t pid; |
1405 | 1332 | ||
@@ -1409,22 +1336,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
1409 | exit_code = 0; | 1336 | exit_code = 0; |
1410 | spin_lock_irq(&p->sighand->siglock); | 1337 | spin_lock_irq(&p->sighand->siglock); |
1411 | 1338 | ||
1412 | if (unlikely(!task_is_stopped_or_traced(p))) | 1339 | p_code = task_stopped_code(p, ptrace); |
1413 | goto unlock_sig; | 1340 | if (unlikely(!p_code)) |
1414 | |||
1415 | if (!ptrace && p->signal->group_stop_count > 0) | ||
1416 | /* | ||
1417 | * A group stop is in progress and this is the group leader. | ||
1418 | * We won't report until all threads have stopped. | ||
1419 | */ | ||
1420 | goto unlock_sig; | 1341 | goto unlock_sig; |
1421 | 1342 | ||
1422 | exit_code = p->exit_code; | 1343 | exit_code = *p_code; |
1423 | if (!exit_code) | 1344 | if (!exit_code) |
1424 | goto unlock_sig; | 1345 | goto unlock_sig; |
1425 | 1346 | ||
1426 | if (!unlikely(options & WNOWAIT)) | 1347 | if (!unlikely(options & WNOWAIT)) |
1427 | p->exit_code = 0; | 1348 | *p_code = 0; |
1428 | 1349 | ||
1429 | /* don't need the RCU readlock here as we're holding a spinlock */ | 1350 | /* don't need the RCU readlock here as we're holding a spinlock */ |
1430 | uid = __task_cred(p)->uid; | 1351 | uid = __task_cred(p)->uid; |
@@ -1580,7 +1501,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace, | |||
1580 | */ | 1501 | */ |
1581 | *notask_error = 0; | 1502 | *notask_error = 0; |
1582 | 1503 | ||
1583 | if (task_is_stopped_or_traced(p)) | 1504 | if (task_stopped_code(p, ptrace)) |
1584 | return wait_task_stopped(ptrace, p, options, | 1505 | return wait_task_stopped(ptrace, p, options, |
1585 | infop, stat_addr, ru); | 1506 | infop, stat_addr, ru); |
1586 | 1507 | ||
@@ -1784,7 +1705,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, | |||
1784 | pid = find_get_pid(-upid); | 1705 | pid = find_get_pid(-upid); |
1785 | } else if (upid == 0) { | 1706 | } else if (upid == 0) { |
1786 | type = PIDTYPE_PGID; | 1707 | type = PIDTYPE_PGID; |
1787 | pid = get_pid(task_pgrp(current)); | 1708 | pid = get_task_pid(current, PIDTYPE_PGID); |
1788 | } else /* upid > 0 */ { | 1709 | } else /* upid > 0 */ { |
1789 | type = PIDTYPE_PID; | 1710 | type = PIDTYPE_PID; |
1790 | pid = find_get_pid(upid); | 1711 | pid = find_get_pid(upid); |
diff --git a/kernel/fork.c b/kernel/fork.c index e82a14577a98..660c2b8765bc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -825,6 +825,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
825 | atomic_set(&sig->live, 1); | 825 | atomic_set(&sig->live, 1); |
826 | init_waitqueue_head(&sig->wait_chldexit); | 826 | init_waitqueue_head(&sig->wait_chldexit); |
827 | sig->flags = 0; | 827 | sig->flags = 0; |
828 | if (clone_flags & CLONE_NEWPID) | ||
829 | sig->flags |= SIGNAL_UNKILLABLE; | ||
828 | sig->group_exit_code = 0; | 830 | sig->group_exit_code = 0; |
829 | sig->group_exit_task = NULL; | 831 | sig->group_exit_task = NULL; |
830 | sig->group_stop_count = 0; | 832 | sig->group_stop_count = 0; |
@@ -1109,7 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1109 | goto bad_fork_cleanup_mm; | 1111 | goto bad_fork_cleanup_mm; |
1110 | if ((retval = copy_io(clone_flags, p))) | 1112 | if ((retval = copy_io(clone_flags, p))) |
1111 | goto bad_fork_cleanup_namespaces; | 1113 | goto bad_fork_cleanup_namespaces; |
1112 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); | 1114 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); |
1113 | if (retval) | 1115 | if (retval) |
1114 | goto bad_fork_cleanup_io; | 1116 | goto bad_fork_cleanup_io; |
1115 | 1117 | ||
@@ -1247,8 +1249,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1247 | p->signal->leader_pid = pid; | 1249 | p->signal->leader_pid = pid; |
1248 | tty_kref_put(p->signal->tty); | 1250 | tty_kref_put(p->signal->tty); |
1249 | p->signal->tty = tty_kref_get(current->signal->tty); | 1251 | p->signal->tty = tty_kref_get(current->signal->tty); |
1250 | set_task_pgrp(p, task_pgrp_nr(current)); | ||
1251 | set_task_session(p, task_session_nr(current)); | ||
1252 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 1252 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
1253 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1253 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
1254 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1254 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
@@ -1472,6 +1472,7 @@ void __init proc_caches_init(void) | |||
1472 | mm_cachep = kmem_cache_create("mm_struct", | 1472 | mm_cachep = kmem_cache_create("mm_struct", |
1473 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1473 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1474 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1474 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1475 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); | ||
1475 | mmap_init(); | 1476 | mmap_init(); |
1476 | } | 1477 | } |
1477 | 1478 | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index 93eed85fe017..5a758c6e4950 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -42,7 +42,7 @@ | |||
42 | note_buf_t* crash_notes; | 42 | note_buf_t* crash_notes; |
43 | 43 | ||
44 | /* vmcoreinfo stuff */ | 44 | /* vmcoreinfo stuff */ |
45 | unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; | 45 | static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; |
46 | u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; | 46 | u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; |
47 | size_t vmcoreinfo_size; | 47 | size_t vmcoreinfo_size; |
48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); | 48 | size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); |
@@ -1409,6 +1409,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
1409 | VMCOREINFO_OFFSET(list_head, prev); | 1409 | VMCOREINFO_OFFSET(list_head, prev); |
1410 | VMCOREINFO_OFFSET(vm_struct, addr); | 1410 | VMCOREINFO_OFFSET(vm_struct, addr); |
1411 | VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); | 1411 | VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); |
1412 | log_buf_kexec_setup(); | ||
1412 | VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); | 1413 | VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); |
1413 | VMCOREINFO_NUMBER(NR_FREE_PAGES); | 1414 | VMCOREINFO_NUMBER(NR_FREE_PAGES); |
1414 | VMCOREINFO_NUMBER(PG_lru); | 1415 | VMCOREINFO_NUMBER(PG_lru); |
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c index 78bc3fdac0d2..5aa854f9e5ae 100644 --- a/kernel/ns_cgroup.c +++ b/kernel/ns_cgroup.c | |||
@@ -34,7 +34,7 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid) | |||
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Rules: | 36 | * Rules: |
37 | * 1. you can only enter a cgroup which is a child of your current | 37 | * 1. you can only enter a cgroup which is a descendant of your current |
38 | * cgroup | 38 | * cgroup |
39 | * 2. you can only place another process into a cgroup if | 39 | * 2. you can only place another process into a cgroup if |
40 | * a. you have CAP_SYS_ADMIN | 40 | * a. you have CAP_SYS_ADMIN |
@@ -45,21 +45,15 @@ int ns_cgroup_clone(struct task_struct *task, struct pid *pid) | |||
45 | static int ns_can_attach(struct cgroup_subsys *ss, | 45 | static int ns_can_attach(struct cgroup_subsys *ss, |
46 | struct cgroup *new_cgroup, struct task_struct *task) | 46 | struct cgroup *new_cgroup, struct task_struct *task) |
47 | { | 47 | { |
48 | struct cgroup *orig; | ||
49 | |||
50 | if (current != task) { | 48 | if (current != task) { |
51 | if (!capable(CAP_SYS_ADMIN)) | 49 | if (!capable(CAP_SYS_ADMIN)) |
52 | return -EPERM; | 50 | return -EPERM; |
53 | 51 | ||
54 | if (!cgroup_is_descendant(new_cgroup)) | 52 | if (!cgroup_is_descendant(new_cgroup, current)) |
55 | return -EPERM; | 53 | return -EPERM; |
56 | } | 54 | } |
57 | 55 | ||
58 | if (atomic_read(&new_cgroup->count) != 0) | 56 | if (!cgroup_is_descendant(new_cgroup, task)) |
59 | return -EPERM; | ||
60 | |||
61 | orig = task_cgroup(task, ns_subsys_id); | ||
62 | if (orig && orig != new_cgroup->parent) | ||
63 | return -EPERM; | 57 | return -EPERM; |
64 | 58 | ||
65 | return 0; | 59 | return 0; |
@@ -77,7 +71,7 @@ static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss, | |||
77 | 71 | ||
78 | if (!capable(CAP_SYS_ADMIN)) | 72 | if (!capable(CAP_SYS_ADMIN)) |
79 | return ERR_PTR(-EPERM); | 73 | return ERR_PTR(-EPERM); |
80 | if (!cgroup_is_descendant(cgroup)) | 74 | if (!cgroup_is_descendant(cgroup, current)) |
81 | return ERR_PTR(-EPERM); | 75 | return ERR_PTR(-EPERM); |
82 | 76 | ||
83 | ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL); | 77 | ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL); |
diff --git a/kernel/pid.c b/kernel/pid.c index 1b3586fe753a..b2e5f78fd281 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -403,6 +403,8 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type) | |||
403 | { | 403 | { |
404 | struct pid *pid; | 404 | struct pid *pid; |
405 | rcu_read_lock(); | 405 | rcu_read_lock(); |
406 | if (type != PIDTYPE_PID) | ||
407 | task = task->group_leader; | ||
406 | pid = get_pid(task->pids[type].pid); | 408 | pid = get_pid(task->pids[type].pid); |
407 | rcu_read_unlock(); | 409 | rcu_read_unlock(); |
408 | return pid; | 410 | return pid; |
@@ -450,11 +452,24 @@ pid_t pid_vnr(struct pid *pid) | |||
450 | } | 452 | } |
451 | EXPORT_SYMBOL_GPL(pid_vnr); | 453 | EXPORT_SYMBOL_GPL(pid_vnr); |
452 | 454 | ||
453 | pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | 455 | pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, |
456 | struct pid_namespace *ns) | ||
454 | { | 457 | { |
455 | return pid_nr_ns(task_pid(tsk), ns); | 458 | pid_t nr = 0; |
459 | |||
460 | rcu_read_lock(); | ||
461 | if (!ns) | ||
462 | ns = current->nsproxy->pid_ns; | ||
463 | if (likely(pid_alive(task))) { | ||
464 | if (type != PIDTYPE_PID) | ||
465 | task = task->group_leader; | ||
466 | nr = pid_nr_ns(task->pids[type].pid, ns); | ||
467 | } | ||
468 | rcu_read_unlock(); | ||
469 | |||
470 | return nr; | ||
456 | } | 471 | } |
457 | EXPORT_SYMBOL(task_pid_nr_ns); | 472 | EXPORT_SYMBOL(__task_pid_nr_ns); |
458 | 473 | ||
459 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | 474 | pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) |
460 | { | 475 | { |
@@ -462,18 +477,6 @@ pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | |||
462 | } | 477 | } |
463 | EXPORT_SYMBOL(task_tgid_nr_ns); | 478 | EXPORT_SYMBOL(task_tgid_nr_ns); |
464 | 479 | ||
465 | pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | ||
466 | { | ||
467 | return pid_nr_ns(task_pgrp(tsk), ns); | ||
468 | } | ||
469 | EXPORT_SYMBOL(task_pgrp_nr_ns); | ||
470 | |||
471 | pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) | ||
472 | { | ||
473 | return pid_nr_ns(task_session(tsk), ns); | ||
474 | } | ||
475 | EXPORT_SYMBOL(task_session_nr_ns); | ||
476 | |||
477 | struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) | 480 | struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) |
478 | { | 481 | { |
479 | return ns_of_pid(task_pid(tsk)); | 482 | return ns_of_pid(task_pid(tsk)); |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index fab8ea86fac3..2d1001b4858d 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -152,6 +152,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
152 | { | 152 | { |
153 | int nr; | 153 | int nr; |
154 | int rc; | 154 | int rc; |
155 | struct task_struct *task; | ||
155 | 156 | ||
156 | /* | 157 | /* |
157 | * The last thread in the cgroup-init thread group is terminating. | 158 | * The last thread in the cgroup-init thread group is terminating. |
@@ -169,7 +170,19 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns) | |||
169 | read_lock(&tasklist_lock); | 170 | read_lock(&tasklist_lock); |
170 | nr = next_pidmap(pid_ns, 1); | 171 | nr = next_pidmap(pid_ns, 1); |
171 | while (nr > 0) { | 172 | while (nr > 0) { |
172 | kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr); | 173 | rcu_read_lock(); |
174 | |||
175 | /* | ||
176 | * Use force_sig() since it clears SIGNAL_UNKILLABLE ensuring | ||
177 | * any nested-container's init processes don't ignore the | ||
178 | * signal | ||
179 | */ | ||
180 | task = pid_task(find_vpid(nr), PIDTYPE_PID); | ||
181 | if (task) | ||
182 | force_sig(SIGKILL, task); | ||
183 | |||
184 | rcu_read_unlock(); | ||
185 | |||
173 | nr = next_pidmap(pid_ns, nr); | 186 | nr = next_pidmap(pid_ns, nr); |
174 | } | 187 | } |
175 | read_unlock(&tasklist_lock); | 188 | read_unlock(&tasklist_lock); |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index e886d1332a10..f3db382c2b2d 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/console.h> | 22 | #include <linux/console.h> |
23 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
24 | #include <linux/freezer.h> | 24 | #include <linux/freezer.h> |
25 | #include <asm/suspend.h> | ||
25 | 26 | ||
26 | #include "power.h" | 27 | #include "power.h" |
27 | 28 | ||
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index f5fc2d7680f2..33e2e4a819f9 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -321,13 +321,10 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) | |||
321 | 321 | ||
322 | INIT_LIST_HEAD(list); | 322 | INIT_LIST_HEAD(list); |
323 | 323 | ||
324 | for_each_zone(zone) { | 324 | for_each_populated_zone(zone) { |
325 | unsigned long zone_start, zone_end; | 325 | unsigned long zone_start, zone_end; |
326 | struct mem_extent *ext, *cur, *aux; | 326 | struct mem_extent *ext, *cur, *aux; |
327 | 327 | ||
328 | if (!populated_zone(zone)) | ||
329 | continue; | ||
330 | |||
331 | zone_start = zone->zone_start_pfn; | 328 | zone_start = zone->zone_start_pfn; |
332 | zone_end = zone->zone_start_pfn + zone->spanned_pages; | 329 | zone_end = zone->zone_start_pfn + zone->spanned_pages; |
333 | 330 | ||
@@ -804,8 +801,8 @@ static unsigned int count_free_highmem_pages(void) | |||
804 | struct zone *zone; | 801 | struct zone *zone; |
805 | unsigned int cnt = 0; | 802 | unsigned int cnt = 0; |
806 | 803 | ||
807 | for_each_zone(zone) | 804 | for_each_populated_zone(zone) |
808 | if (populated_zone(zone) && is_highmem(zone)) | 805 | if (is_highmem(zone)) |
809 | cnt += zone_page_state(zone, NR_FREE_PAGES); | 806 | cnt += zone_page_state(zone, NR_FREE_PAGES); |
810 | 807 | ||
811 | return cnt; | 808 | return cnt; |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index a92c91451559..78c35047586d 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/highmem.h> | 51 | #include <linux/highmem.h> |
52 | #include <linux/time.h> | 52 | #include <linux/time.h> |
53 | #include <linux/rbtree.h> | 53 | #include <linux/rbtree.h> |
54 | #include <linux/io.h> | ||
54 | 55 | ||
55 | #include "power.h" | 56 | #include "power.h" |
56 | 57 | ||
@@ -229,17 +230,16 @@ int swsusp_shrink_memory(void) | |||
229 | size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; | 230 | size = count_data_pages() + PAGES_FOR_IO + SPARE_PAGES; |
230 | tmp = size; | 231 | tmp = size; |
231 | size += highmem_size; | 232 | size += highmem_size; |
232 | for_each_zone (zone) | 233 | for_each_populated_zone(zone) { |
233 | if (populated_zone(zone)) { | 234 | tmp += snapshot_additional_pages(zone); |
234 | tmp += snapshot_additional_pages(zone); | 235 | if (is_highmem(zone)) { |
235 | if (is_highmem(zone)) { | 236 | highmem_size -= |
236 | highmem_size -= | ||
237 | zone_page_state(zone, NR_FREE_PAGES); | 237 | zone_page_state(zone, NR_FREE_PAGES); |
238 | } else { | 238 | } else { |
239 | tmp -= zone_page_state(zone, NR_FREE_PAGES); | 239 | tmp -= zone_page_state(zone, NR_FREE_PAGES); |
240 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; | 240 | tmp += zone->lowmem_reserve[ZONE_NORMAL]; |
241 | } | ||
242 | } | 241 | } |
242 | } | ||
243 | 243 | ||
244 | if (highmem_size < 0) | 244 | if (highmem_size < 0) |
245 | highmem_size = 0; | 245 | highmem_size = 0; |
diff --git a/kernel/printk.c b/kernel/printk.c index e3602d0755b0..a5f61a9acedb 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/security.h> | 32 | #include <linux/security.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/syscalls.h> | 34 | #include <linux/syscalls.h> |
35 | #include <linux/kexec.h> | ||
35 | 36 | ||
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | 38 | ||
@@ -135,6 +136,24 @@ static char *log_buf = __log_buf; | |||
135 | static int log_buf_len = __LOG_BUF_LEN; | 136 | static int log_buf_len = __LOG_BUF_LEN; |
136 | static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ | 137 | static unsigned logged_chars; /* Number of chars produced since last read+clear operation */ |
137 | 138 | ||
139 | #ifdef CONFIG_KEXEC | ||
140 | /* | ||
141 | * This appends the listed symbols to /proc/vmcoreinfo | ||
142 | * | ||
143 | * /proc/vmcoreinfo is used by various utiilties, like crash and makedumpfile to | ||
144 | * obtain access to symbols that are otherwise very difficult to locate. These | ||
145 | * symbols are specifically used so that utilities can access and extract the | ||
146 | * dmesg log from a vmcore file after a crash. | ||
147 | */ | ||
148 | void log_buf_kexec_setup(void) | ||
149 | { | ||
150 | VMCOREINFO_SYMBOL(log_buf); | ||
151 | VMCOREINFO_SYMBOL(log_end); | ||
152 | VMCOREINFO_SYMBOL(log_buf_len); | ||
153 | VMCOREINFO_SYMBOL(logged_chars); | ||
154 | } | ||
155 | #endif | ||
156 | |||
138 | static int __init log_buf_len_setup(char *str) | 157 | static int __init log_buf_len_setup(char *str) |
139 | { | 158 | { |
140 | unsigned size = memparse(str, &str); | 159 | unsigned size = memparse(str, &str); |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index c9cf48b21f05..5105f5a6a2ce 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -60,11 +60,15 @@ static void ptrace_untrace(struct task_struct *child) | |||
60 | { | 60 | { |
61 | spin_lock(&child->sighand->siglock); | 61 | spin_lock(&child->sighand->siglock); |
62 | if (task_is_traced(child)) { | 62 | if (task_is_traced(child)) { |
63 | if (child->signal->flags & SIGNAL_STOP_STOPPED) { | 63 | /* |
64 | * If the group stop is completed or in progress, | ||
65 | * this thread was already counted as stopped. | ||
66 | */ | ||
67 | if (child->signal->flags & SIGNAL_STOP_STOPPED || | ||
68 | child->signal->group_stop_count) | ||
64 | __set_task_state(child, TASK_STOPPED); | 69 | __set_task_state(child, TASK_STOPPED); |
65 | } else { | 70 | else |
66 | signal_wake_up(child, 1); | 71 | signal_wake_up(child, 1); |
67 | } | ||
68 | } | 72 | } |
69 | spin_unlock(&child->sighand->siglock); | 73 | spin_unlock(&child->sighand->siglock); |
70 | } | 74 | } |
@@ -235,18 +239,58 @@ out: | |||
235 | return retval; | 239 | return retval; |
236 | } | 240 | } |
237 | 241 | ||
238 | static inline void __ptrace_detach(struct task_struct *child, unsigned int data) | 242 | /* |
243 | * Called with irqs disabled, returns true if childs should reap themselves. | ||
244 | */ | ||
245 | static int ignoring_children(struct sighand_struct *sigh) | ||
239 | { | 246 | { |
240 | child->exit_code = data; | 247 | int ret; |
241 | /* .. re-parent .. */ | 248 | spin_lock(&sigh->siglock); |
242 | __ptrace_unlink(child); | 249 | ret = (sigh->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) || |
243 | /* .. and wake it up. */ | 250 | (sigh->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT); |
244 | if (child->exit_state != EXIT_ZOMBIE) | 251 | spin_unlock(&sigh->siglock); |
245 | wake_up_process(child); | 252 | return ret; |
253 | } | ||
254 | |||
255 | /* | ||
256 | * Called with tasklist_lock held for writing. | ||
257 | * Unlink a traced task, and clean it up if it was a traced zombie. | ||
258 | * Return true if it needs to be reaped with release_task(). | ||
259 | * (We can't call release_task() here because we already hold tasklist_lock.) | ||
260 | * | ||
261 | * If it's a zombie, our attachedness prevented normal parent notification | ||
262 | * or self-reaping. Do notification now if it would have happened earlier. | ||
263 | * If it should reap itself, return true. | ||
264 | * | ||
265 | * If it's our own child, there is no notification to do. | ||
266 | * But if our normal children self-reap, then this child | ||
267 | * was prevented by ptrace and we must reap it now. | ||
268 | */ | ||
269 | static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | ||
270 | { | ||
271 | __ptrace_unlink(p); | ||
272 | |||
273 | if (p->exit_state == EXIT_ZOMBIE) { | ||
274 | if (!task_detached(p) && thread_group_empty(p)) { | ||
275 | if (!same_thread_group(p->real_parent, tracer)) | ||
276 | do_notify_parent(p, p->exit_signal); | ||
277 | else if (ignoring_children(tracer->sighand)) | ||
278 | p->exit_signal = -1; | ||
279 | } | ||
280 | if (task_detached(p)) { | ||
281 | /* Mark it as in the process of being reaped. */ | ||
282 | p->exit_state = EXIT_DEAD; | ||
283 | return true; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | return false; | ||
246 | } | 288 | } |
247 | 289 | ||
248 | int ptrace_detach(struct task_struct *child, unsigned int data) | 290 | int ptrace_detach(struct task_struct *child, unsigned int data) |
249 | { | 291 | { |
292 | bool dead = false; | ||
293 | |||
250 | if (!valid_signal(data)) | 294 | if (!valid_signal(data)) |
251 | return -EIO; | 295 | return -EIO; |
252 | 296 | ||
@@ -255,14 +299,45 @@ int ptrace_detach(struct task_struct *child, unsigned int data) | |||
255 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 299 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
256 | 300 | ||
257 | write_lock_irq(&tasklist_lock); | 301 | write_lock_irq(&tasklist_lock); |
258 | /* protect against de_thread()->release_task() */ | 302 | /* |
259 | if (child->ptrace) | 303 | * This child can be already killed. Make sure de_thread() or |
260 | __ptrace_detach(child, data); | 304 | * our sub-thread doing do_wait() didn't do release_task() yet. |
305 | */ | ||
306 | if (child->ptrace) { | ||
307 | child->exit_code = data; | ||
308 | dead = __ptrace_detach(current, child); | ||
309 | } | ||
261 | write_unlock_irq(&tasklist_lock); | 310 | write_unlock_irq(&tasklist_lock); |
262 | 311 | ||
312 | if (unlikely(dead)) | ||
313 | release_task(child); | ||
314 | |||
263 | return 0; | 315 | return 0; |
264 | } | 316 | } |
265 | 317 | ||
318 | /* | ||
319 | * Detach all tasks we were using ptrace on. | ||
320 | */ | ||
321 | void exit_ptrace(struct task_struct *tracer) | ||
322 | { | ||
323 | struct task_struct *p, *n; | ||
324 | LIST_HEAD(ptrace_dead); | ||
325 | |||
326 | write_lock_irq(&tasklist_lock); | ||
327 | list_for_each_entry_safe(p, n, &tracer->ptraced, ptrace_entry) { | ||
328 | if (__ptrace_detach(tracer, p)) | ||
329 | list_add(&p->ptrace_entry, &ptrace_dead); | ||
330 | } | ||
331 | write_unlock_irq(&tasklist_lock); | ||
332 | |||
333 | BUG_ON(!list_empty(&tracer->ptraced)); | ||
334 | |||
335 | list_for_each_entry_safe(p, n, &ptrace_dead, ptrace_entry) { | ||
336 | list_del_init(&p->ptrace_entry); | ||
337 | release_task(p); | ||
338 | } | ||
339 | } | ||
340 | |||
266 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) | 341 | int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len) |
267 | { | 342 | { |
268 | int copied = 0; | 343 | int copied = 0; |
diff --git a/kernel/relay.c b/kernel/relay.c index 8f2179c8056f..e92db8c06acf 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -797,13 +797,15 @@ void relay_subbufs_consumed(struct rchan *chan, | |||
797 | if (!chan) | 797 | if (!chan) |
798 | return; | 798 | return; |
799 | 799 | ||
800 | if (cpu >= NR_CPUS || !chan->buf[cpu]) | 800 | if (cpu >= NR_CPUS || !chan->buf[cpu] || |
801 | subbufs_consumed > chan->n_subbufs) | ||
801 | return; | 802 | return; |
802 | 803 | ||
803 | buf = chan->buf[cpu]; | 804 | buf = chan->buf[cpu]; |
804 | buf->subbufs_consumed += subbufs_consumed; | 805 | if (subbufs_consumed > buf->subbufs_produced - buf->subbufs_consumed) |
805 | if (buf->subbufs_consumed > buf->subbufs_produced) | ||
806 | buf->subbufs_consumed = buf->subbufs_produced; | 806 | buf->subbufs_consumed = buf->subbufs_produced; |
807 | else | ||
808 | buf->subbufs_consumed += subbufs_consumed; | ||
807 | } | 809 | } |
808 | EXPORT_SYMBOL_GPL(relay_subbufs_consumed); | 810 | EXPORT_SYMBOL_GPL(relay_subbufs_consumed); |
809 | 811 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 196d48babbef..73513f4e19df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5196,11 +5196,17 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
5196 | __wake_up_common(q, mode, 1, 0, NULL); | 5196 | __wake_up_common(q, mode, 1, 0, NULL); |
5197 | } | 5197 | } |
5198 | 5198 | ||
5199 | void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | ||
5200 | { | ||
5201 | __wake_up_common(q, mode, 1, 0, key); | ||
5202 | } | ||
5203 | |||
5199 | /** | 5204 | /** |
5200 | * __wake_up_sync - wake up threads blocked on a waitqueue. | 5205 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
5201 | * @q: the waitqueue | 5206 | * @q: the waitqueue |
5202 | * @mode: which threads | 5207 | * @mode: which threads |
5203 | * @nr_exclusive: how many wake-one or wake-many threads to wake up | 5208 | * @nr_exclusive: how many wake-one or wake-many threads to wake up |
5209 | * @key: opaque value to be passed to wakeup targets | ||
5204 | * | 5210 | * |
5205 | * The sync wakeup differs that the waker knows that it will schedule | 5211 | * The sync wakeup differs that the waker knows that it will schedule |
5206 | * away soon, so while the target thread will be woken up, it will not | 5212 | * away soon, so while the target thread will be woken up, it will not |
@@ -5209,8 +5215,8 @@ void __wake_up_locked(wait_queue_head_t *q, unsigned int mode) | |||
5209 | * | 5215 | * |
5210 | * On UP it can prevent extra preemption. | 5216 | * On UP it can prevent extra preemption. |
5211 | */ | 5217 | */ |
5212 | void | 5218 | void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, |
5213 | __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | 5219 | int nr_exclusive, void *key) |
5214 | { | 5220 | { |
5215 | unsigned long flags; | 5221 | unsigned long flags; |
5216 | int sync = 1; | 5222 | int sync = 1; |
@@ -5222,9 +5228,18 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | |||
5222 | sync = 0; | 5228 | sync = 0; |
5223 | 5229 | ||
5224 | spin_lock_irqsave(&q->lock, flags); | 5230 | spin_lock_irqsave(&q->lock, flags); |
5225 | __wake_up_common(q, mode, nr_exclusive, sync, NULL); | 5231 | __wake_up_common(q, mode, nr_exclusive, sync, key); |
5226 | spin_unlock_irqrestore(&q->lock, flags); | 5232 | spin_unlock_irqrestore(&q->lock, flags); |
5227 | } | 5233 | } |
5234 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | ||
5235 | |||
5236 | /* | ||
5237 | * __wake_up_sync - see __wake_up_sync_key() | ||
5238 | */ | ||
5239 | void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | ||
5240 | { | ||
5241 | __wake_up_sync_key(q, mode, nr_exclusive, NULL); | ||
5242 | } | ||
5228 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | 5243 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
5229 | 5244 | ||
5230 | /** | 5245 | /** |
diff --git a/kernel/signal.c b/kernel/signal.c index 1c8814481a11..d8034737db4c 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -55,10 +55,22 @@ static int sig_handler_ignored(void __user *handler, int sig) | |||
55 | (handler == SIG_DFL && sig_kernel_ignore(sig)); | 55 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
56 | } | 56 | } |
57 | 57 | ||
58 | static int sig_ignored(struct task_struct *t, int sig) | 58 | static int sig_task_ignored(struct task_struct *t, int sig, |
59 | int from_ancestor_ns) | ||
59 | { | 60 | { |
60 | void __user *handler; | 61 | void __user *handler; |
61 | 62 | ||
63 | handler = sig_handler(t, sig); | ||
64 | |||
65 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && | ||
66 | handler == SIG_DFL && !from_ancestor_ns) | ||
67 | return 1; | ||
68 | |||
69 | return sig_handler_ignored(handler, sig); | ||
70 | } | ||
71 | |||
72 | static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns) | ||
73 | { | ||
62 | /* | 74 | /* |
63 | * Blocked signals are never ignored, since the | 75 | * Blocked signals are never ignored, since the |
64 | * signal handler may change by the time it is | 76 | * signal handler may change by the time it is |
@@ -67,14 +79,13 @@ static int sig_ignored(struct task_struct *t, int sig) | |||
67 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) | 79 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
68 | return 0; | 80 | return 0; |
69 | 81 | ||
70 | handler = sig_handler(t, sig); | 82 | if (!sig_task_ignored(t, sig, from_ancestor_ns)) |
71 | if (!sig_handler_ignored(handler, sig)) | ||
72 | return 0; | 83 | return 0; |
73 | 84 | ||
74 | /* | 85 | /* |
75 | * Tracers may want to know about even ignored signals. | 86 | * Tracers may want to know about even ignored signals. |
76 | */ | 87 | */ |
77 | return !tracehook_consider_ignored_signal(t, sig, handler); | 88 | return !tracehook_consider_ignored_signal(t, sig); |
78 | } | 89 | } |
79 | 90 | ||
80 | /* | 91 | /* |
@@ -318,7 +329,7 @@ int unhandled_signal(struct task_struct *tsk, int sig) | |||
318 | return 1; | 329 | return 1; |
319 | if (handler != SIG_IGN && handler != SIG_DFL) | 330 | if (handler != SIG_IGN && handler != SIG_DFL) |
320 | return 0; | 331 | return 0; |
321 | return !tracehook_consider_fatal_signal(tsk, sig, handler); | 332 | return !tracehook_consider_fatal_signal(tsk, sig); |
322 | } | 333 | } |
323 | 334 | ||
324 | 335 | ||
@@ -624,7 +635,7 @@ static int check_kill_permission(int sig, struct siginfo *info, | |||
624 | * Returns true if the signal should be actually delivered, otherwise | 635 | * Returns true if the signal should be actually delivered, otherwise |
625 | * it should be dropped. | 636 | * it should be dropped. |
626 | */ | 637 | */ |
627 | static int prepare_signal(int sig, struct task_struct *p) | 638 | static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns) |
628 | { | 639 | { |
629 | struct signal_struct *signal = p->signal; | 640 | struct signal_struct *signal = p->signal; |
630 | struct task_struct *t; | 641 | struct task_struct *t; |
@@ -708,7 +719,7 @@ static int prepare_signal(int sig, struct task_struct *p) | |||
708 | } | 719 | } |
709 | } | 720 | } |
710 | 721 | ||
711 | return !sig_ignored(p, sig); | 722 | return !sig_ignored(p, sig, from_ancestor_ns); |
712 | } | 723 | } |
713 | 724 | ||
714 | /* | 725 | /* |
@@ -777,7 +788,7 @@ static void complete_signal(int sig, struct task_struct *p, int group) | |||
777 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && | 788 | !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) && |
778 | !sigismember(&t->real_blocked, sig) && | 789 | !sigismember(&t->real_blocked, sig) && |
779 | (sig == SIGKILL || | 790 | (sig == SIGKILL || |
780 | !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) { | 791 | !tracehook_consider_fatal_signal(t, sig))) { |
781 | /* | 792 | /* |
782 | * This signal will be fatal to the whole group. | 793 | * This signal will be fatal to the whole group. |
783 | */ | 794 | */ |
@@ -813,8 +824,8 @@ static inline int legacy_queue(struct sigpending *signals, int sig) | |||
813 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | 824 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); |
814 | } | 825 | } |
815 | 826 | ||
816 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | 827 | static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, |
817 | int group) | 828 | int group, int from_ancestor_ns) |
818 | { | 829 | { |
819 | struct sigpending *pending; | 830 | struct sigpending *pending; |
820 | struct sigqueue *q; | 831 | struct sigqueue *q; |
@@ -822,7 +833,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
822 | trace_sched_signal_send(sig, t); | 833 | trace_sched_signal_send(sig, t); |
823 | 834 | ||
824 | assert_spin_locked(&t->sighand->siglock); | 835 | assert_spin_locked(&t->sighand->siglock); |
825 | if (!prepare_signal(sig, t)) | 836 | |
837 | if (!prepare_signal(sig, t, from_ancestor_ns)) | ||
826 | return 0; | 838 | return 0; |
827 | 839 | ||
828 | pending = group ? &t->signal->shared_pending : &t->pending; | 840 | pending = group ? &t->signal->shared_pending : &t->pending; |
@@ -871,6 +883,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | |||
871 | break; | 883 | break; |
872 | default: | 884 | default: |
873 | copy_siginfo(&q->info, info); | 885 | copy_siginfo(&q->info, info); |
886 | if (from_ancestor_ns) | ||
887 | q->info.si_pid = 0; | ||
874 | break; | 888 | break; |
875 | } | 889 | } |
876 | } else if (!is_si_special(info)) { | 890 | } else if (!is_si_special(info)) { |
@@ -889,6 +903,20 @@ out_set: | |||
889 | return 0; | 903 | return 0; |
890 | } | 904 | } |
891 | 905 | ||
906 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, | ||
907 | int group) | ||
908 | { | ||
909 | int from_ancestor_ns = 0; | ||
910 | |||
911 | #ifdef CONFIG_PID_NS | ||
912 | if (!is_si_special(info) && SI_FROMUSER(info) && | ||
913 | task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0) | ||
914 | from_ancestor_ns = 1; | ||
915 | #endif | ||
916 | |||
917 | return __send_signal(sig, info, t, group, from_ancestor_ns); | ||
918 | } | ||
919 | |||
892 | int print_fatal_signals; | 920 | int print_fatal_signals; |
893 | 921 | ||
894 | static void print_fatal_signal(struct pt_regs *regs, int signr) | 922 | static void print_fatal_signal(struct pt_regs *regs, int signr) |
@@ -1133,7 +1161,7 @@ int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid, | |||
1133 | if (sig && p->sighand) { | 1161 | if (sig && p->sighand) { |
1134 | unsigned long flags; | 1162 | unsigned long flags; |
1135 | spin_lock_irqsave(&p->sighand->siglock, flags); | 1163 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1136 | ret = __group_send_sig_info(sig, info, p); | 1164 | ret = __send_signal(sig, info, p, 1, 0); |
1137 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | 1165 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1138 | } | 1166 | } |
1139 | out_unlock: | 1167 | out_unlock: |
@@ -1320,7 +1348,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group) | |||
1320 | goto ret; | 1348 | goto ret; |
1321 | 1349 | ||
1322 | ret = 1; /* the signal is ignored */ | 1350 | ret = 1; /* the signal is ignored */ |
1323 | if (!prepare_signal(sig, t)) | 1351 | if (!prepare_signal(sig, t, 0)) |
1324 | goto out; | 1352 | goto out; |
1325 | 1353 | ||
1326 | ret = 0; | 1354 | ret = 0; |
@@ -1844,9 +1872,16 @@ relock: | |||
1844 | 1872 | ||
1845 | /* | 1873 | /* |
1846 | * Global init gets no signals it doesn't want. | 1874 | * Global init gets no signals it doesn't want. |
1875 | * Container-init gets no signals it doesn't want from same | ||
1876 | * container. | ||
1877 | * | ||
1878 | * Note that if global/container-init sees a sig_kernel_only() | ||
1879 | * signal here, the signal must have been generated internally | ||
1880 | * or must have come from an ancestor namespace. In either | ||
1881 | * case, the signal cannot be dropped. | ||
1847 | */ | 1882 | */ |
1848 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && | 1883 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
1849 | !signal_group_exit(signal)) | 1884 | !sig_kernel_only(signr)) |
1850 | continue; | 1885 | continue; |
1851 | 1886 | ||
1852 | if (sig_kernel_stop(signr)) { | 1887 | if (sig_kernel_stop(signr)) { |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 29ab20749dd3..7932653c4ebd 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -121,7 +121,8 @@ unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock) | |||
121 | local_irq_save(flags); | 121 | local_irq_save(flags); |
122 | preempt_disable(); | 122 | preempt_disable(); |
123 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | 123 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
124 | LOCK_CONTENDED(lock, _raw_read_trylock, _raw_read_lock); | 124 | LOCK_CONTENDED_FLAGS(lock, _raw_read_trylock, _raw_read_lock, |
125 | _raw_read_lock_flags, &flags); | ||
125 | return flags; | 126 | return flags; |
126 | } | 127 | } |
127 | EXPORT_SYMBOL(_read_lock_irqsave); | 128 | EXPORT_SYMBOL(_read_lock_irqsave); |
@@ -151,7 +152,8 @@ unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock) | |||
151 | local_irq_save(flags); | 152 | local_irq_save(flags); |
152 | preempt_disable(); | 153 | preempt_disable(); |
153 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 154 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
154 | LOCK_CONTENDED(lock, _raw_write_trylock, _raw_write_lock); | 155 | LOCK_CONTENDED_FLAGS(lock, _raw_write_trylock, _raw_write_lock, |
156 | _raw_write_lock_flags, &flags); | ||
155 | return flags; | 157 | return flags; |
156 | } | 158 | } |
157 | EXPORT_SYMBOL(_write_lock_irqsave); | 159 | EXPORT_SYMBOL(_write_lock_irqsave); |
@@ -299,16 +301,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | |||
299 | local_irq_save(flags); | 301 | local_irq_save(flags); |
300 | preempt_disable(); | 302 | preempt_disable(); |
301 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | 303 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); |
302 | /* | 304 | LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, |
303 | * On lockdep we dont want the hand-coded irq-enable of | 305 | _raw_spin_lock_flags, &flags); |
304 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
305 | * that interrupts are not re-enabled during lock-acquire: | ||
306 | */ | ||
307 | #ifdef CONFIG_LOCKDEP | ||
308 | LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); | ||
309 | #else | ||
310 | _raw_spin_lock_flags(lock, &flags); | ||
311 | #endif | ||
312 | return flags; | 306 | return flags; |
313 | } | 307 | } |
314 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | 308 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); |
diff --git a/kernel/sys.c b/kernel/sys.c index ce182aaed204..51dbb55604e8 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1014,10 +1014,8 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid) | |||
1014 | if (err) | 1014 | if (err) |
1015 | goto out; | 1015 | goto out; |
1016 | 1016 | ||
1017 | if (task_pgrp(p) != pgrp) { | 1017 | if (task_pgrp(p) != pgrp) |
1018 | change_pid(p, PIDTYPE_PGID, pgrp); | 1018 | change_pid(p, PIDTYPE_PGID, pgrp); |
1019 | set_task_pgrp(p, pid_nr(pgrp)); | ||
1020 | } | ||
1021 | 1019 | ||
1022 | err = 0; | 1020 | err = 0; |
1023 | out: | 1021 | out: |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c5ef44ff850f..5ec4543dfc06 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -95,12 +95,9 @@ static int sixty = 60; | |||
95 | static int neg_one = -1; | 95 | static int neg_one = -1; |
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING) | ||
99 | static int two = 2; | ||
100 | #endif | ||
101 | |||
102 | static int zero; | 98 | static int zero; |
103 | static int one = 1; | 99 | static int one = 1; |
100 | static int two = 2; | ||
104 | static unsigned long one_ul = 1; | 101 | static unsigned long one_ul = 1; |
105 | static int one_hundred = 100; | 102 | static int one_hundred = 100; |
106 | 103 | ||
@@ -1010,7 +1007,7 @@ static struct ctl_table vm_table[] = { | |||
1010 | .data = &dirty_expire_interval, | 1007 | .data = &dirty_expire_interval, |
1011 | .maxlen = sizeof(dirty_expire_interval), | 1008 | .maxlen = sizeof(dirty_expire_interval), |
1012 | .mode = 0644, | 1009 | .mode = 0644, |
1013 | .proc_handler = &proc_dointvec_userhz_jiffies, | 1010 | .proc_handler = &proc_dointvec, |
1014 | }, | 1011 | }, |
1015 | { | 1012 | { |
1016 | .ctl_name = VM_NR_PDFLUSH_THREADS, | 1013 | .ctl_name = VM_NR_PDFLUSH_THREADS, |
@@ -1373,10 +1370,7 @@ static struct ctl_table fs_table[] = { | |||
1373 | .data = &lease_break_time, | 1370 | .data = &lease_break_time, |
1374 | .maxlen = sizeof(int), | 1371 | .maxlen = sizeof(int), |
1375 | .mode = 0644, | 1372 | .mode = 0644, |
1376 | .proc_handler = &proc_dointvec_minmax, | 1373 | .proc_handler = &proc_dointvec, |
1377 | .strategy = &sysctl_intvec, | ||
1378 | .extra1 = &zero, | ||
1379 | .extra2 = &two, | ||
1380 | }, | 1374 | }, |
1381 | #endif | 1375 | #endif |
1382 | #ifdef CONFIG_AIO | 1376 | #ifdef CONFIG_AIO |
@@ -1417,7 +1411,10 @@ static struct ctl_table fs_table[] = { | |||
1417 | .data = &suid_dumpable, | 1411 | .data = &suid_dumpable, |
1418 | .maxlen = sizeof(int), | 1412 | .maxlen = sizeof(int), |
1419 | .mode = 0644, | 1413 | .mode = 0644, |
1420 | .proc_handler = &proc_dointvec, | 1414 | .proc_handler = &proc_dointvec_minmax, |
1415 | .strategy = &sysctl_intvec, | ||
1416 | .extra1 = &zero, | ||
1417 | .extra2 = &two, | ||
1421 | }, | 1418 | }, |
1422 | #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) | 1419 | #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) |
1423 | { | 1420 | { |
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c index 3b34b3545936..92359cc747a7 100644 --- a/kernel/utsname_sysctl.c +++ b/kernel/utsname_sysctl.c | |||
@@ -37,7 +37,7 @@ static void put_uts(ctl_table *table, int write, void *which) | |||
37 | up_write(&uts_sem); | 37 | up_write(&uts_sem); |
38 | } | 38 | } |
39 | 39 | ||
40 | #ifdef CONFIG_PROC_FS | 40 | #ifdef CONFIG_PROC_SYSCTL |
41 | /* | 41 | /* |
42 | * Special case of dostring for the UTS structure. This has locks | 42 | * Special case of dostring for the UTS structure. This has locks |
43 | * to observe. Should this be in kernel/sys.c ???? | 43 | * to observe. Should this be in kernel/sys.c ???? |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9aedd9fd825b..32f8e0d2bf5a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -48,8 +48,6 @@ struct cpu_workqueue_struct { | |||
48 | 48 | ||
49 | struct workqueue_struct *wq; | 49 | struct workqueue_struct *wq; |
50 | struct task_struct *thread; | 50 | struct task_struct *thread; |
51 | |||
52 | int run_depth; /* Detect run_workqueue() recursion depth */ | ||
53 | } ____cacheline_aligned; | 51 | } ____cacheline_aligned; |
54 | 52 | ||
55 | /* | 53 | /* |
@@ -262,13 +260,6 @@ EXPORT_SYMBOL_GPL(queue_delayed_work_on); | |||
262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 260 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
263 | { | 261 | { |
264 | spin_lock_irq(&cwq->lock); | 262 | spin_lock_irq(&cwq->lock); |
265 | cwq->run_depth++; | ||
266 | if (cwq->run_depth > 3) { | ||
267 | /* morton gets to eat his hat */ | ||
268 | printk("%s: recursion depth exceeded: %d\n", | ||
269 | __func__, cwq->run_depth); | ||
270 | dump_stack(); | ||
271 | } | ||
272 | while (!list_empty(&cwq->worklist)) { | 263 | while (!list_empty(&cwq->worklist)) { |
273 | struct work_struct *work = list_entry(cwq->worklist.next, | 264 | struct work_struct *work = list_entry(cwq->worklist.next, |
274 | struct work_struct, entry); | 265 | struct work_struct, entry); |
@@ -311,7 +302,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
311 | spin_lock_irq(&cwq->lock); | 302 | spin_lock_irq(&cwq->lock); |
312 | cwq->current_work = NULL; | 303 | cwq->current_work = NULL; |
313 | } | 304 | } |
314 | cwq->run_depth--; | ||
315 | spin_unlock_irq(&cwq->lock); | 305 | spin_unlock_irq(&cwq->lock); |
316 | } | 306 | } |
317 | 307 | ||
@@ -368,29 +358,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
368 | 358 | ||
369 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 359 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
370 | { | 360 | { |
371 | int active; | 361 | int active = 0; |
362 | struct wq_barrier barr; | ||
372 | 363 | ||
373 | if (cwq->thread == current) { | 364 | WARN_ON(cwq->thread == current); |
374 | /* | ||
375 | * Probably keventd trying to flush its own queue. So simply run | ||
376 | * it by hand rather than deadlocking. | ||
377 | */ | ||
378 | run_workqueue(cwq); | ||
379 | active = 1; | ||
380 | } else { | ||
381 | struct wq_barrier barr; | ||
382 | 365 | ||
383 | active = 0; | 366 | spin_lock_irq(&cwq->lock); |
384 | spin_lock_irq(&cwq->lock); | 367 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
385 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | 368 | insert_wq_barrier(cwq, &barr, &cwq->worklist); |
386 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | 369 | active = 1; |
387 | active = 1; | ||
388 | } | ||
389 | spin_unlock_irq(&cwq->lock); | ||
390 | |||
391 | if (active) | ||
392 | wait_for_completion(&barr.done); | ||
393 | } | 370 | } |
371 | spin_unlock_irq(&cwq->lock); | ||
372 | |||
373 | if (active) | ||
374 | wait_for_completion(&barr.done); | ||
394 | 375 | ||
395 | return active; | 376 | return active; |
396 | } | 377 | } |