aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-12-08 09:25:06 -0500
committerIngo Molnar <mingo@kernel.org>2012-12-08 09:25:06 -0500
commitf0b9abfb044649bc452fb2fb975ff2fd599cc6a3 (patch)
tree7800081c5cb16a4dfee1e57a70f3be90f7b50d9a /kernel
parentadc1ef1e37358d3c17d1a74a58b2e104fc0bda15 (diff)
parent1b3c393cd43f22ead8a6a2f839efc6df8ebd7465 (diff)
Merge branch 'linus' into perf/core
Conflicts: tools/perf/Makefile tools/perf/builtin-test.c tools/perf/perf.h tools/perf/tests/parse-events.c tools/perf/util/evsel.h Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/cgroup.c41
-rw-r--r--kernel/events/hw_breakpoint.c12
-rw-r--r--kernel/futex.c59
-rw-r--r--kernel/modsign_pubkey.c4
-rw-r--r--kernel/module.c27
-rw-r--r--kernel/module_signing.c14
-rw-r--r--kernel/pid_namespace.c12
-rw-r--r--kernel/sched/auto_group.c4
-rw-r--r--kernel/sched/auto_group.h5
-rw-r--r--kernel/watchdog.c7
-rw-r--r--kernel/workqueue.c24
12 files changed, 116 insertions, 99 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 0dfeca4324ee..86e3285ae7e5 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -174,10 +174,8 @@ signing_key.priv signing_key.x509: x509.genkey
174 @echo "###" 174 @echo "###"
175 @echo "### If this takes a long time, you might wish to run rngd in the" 175 @echo "### If this takes a long time, you might wish to run rngd in the"
176 @echo "### background to keep the supply of entropy topped up. It" 176 @echo "### background to keep the supply of entropy topped up. It"
177 @echo "### needs to be run as root, and should use a hardware random" 177 @echo "### needs to be run as root, and uses a hardware random"
178 @echo "### number generator if one is available, eg:" 178 @echo "### number generator if one is available."
179 @echo "###"
180 @echo "### rngd -r /dev/hwrandom"
181 @echo "###" 179 @echo "###"
182 openssl req -new -nodes -utf8 $(sign_key_with_hash) -days 36500 -batch \ 180 openssl req -new -nodes -utf8 $(sign_key_with_hash) -days 36500 -batch \
183 -x509 -config x509.genkey \ 181 -x509 -config x509.genkey \
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 13774b3b39aa..f24f724620dd 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1962,9 +1962,8 @@ static void cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp,
1962 * trading it for newcg is protected by cgroup_mutex, we're safe to drop 1962 * trading it for newcg is protected by cgroup_mutex, we're safe to drop
1963 * it here; it will be freed under RCU. 1963 * it here; it will be freed under RCU.
1964 */ 1964 */
1965 put_css_set(oldcg);
1966
1967 set_bit(CGRP_RELEASABLE, &oldcgrp->flags); 1965 set_bit(CGRP_RELEASABLE, &oldcgrp->flags);
1966 put_css_set(oldcg);
1968} 1967}
1969 1968
1970/** 1969/**
@@ -4815,31 +4814,20 @@ static const struct file_operations proc_cgroupstats_operations = {
4815 * 4814 *
4816 * A pointer to the shared css_set was automatically copied in 4815 * A pointer to the shared css_set was automatically copied in
4817 * fork.c by dup_task_struct(). However, we ignore that copy, since 4816 * fork.c by dup_task_struct(). However, we ignore that copy, since
4818 * it was not made under the protection of RCU, cgroup_mutex or 4817 * it was not made under the protection of RCU or cgroup_mutex, so
4819 * threadgroup_change_begin(), so it might no longer be a valid 4818 * might no longer be a valid cgroup pointer. cgroup_attach_task() might
4820 * cgroup pointer. cgroup_attach_task() might have already changed 4819 * have already changed current->cgroups, allowing the previously
4821 * current->cgroups, allowing the previously referenced cgroup 4820 * referenced cgroup group to be removed and freed.
4822 * group to be removed and freed.
4823 *
4824 * Outside the pointer validity we also need to process the css_set
4825 * inheritance between threadgoup_change_begin() and
4826 * threadgoup_change_end(), this way there is no leak in any process
4827 * wide migration performed by cgroup_attach_proc() that could otherwise
4828 * miss a thread because it is too early or too late in the fork stage.
4829 * 4821 *
4830 * At the point that cgroup_fork() is called, 'current' is the parent 4822 * At the point that cgroup_fork() is called, 'current' is the parent
4831 * task, and the passed argument 'child' points to the child task. 4823 * task, and the passed argument 'child' points to the child task.
4832 */ 4824 */
4833void cgroup_fork(struct task_struct *child) 4825void cgroup_fork(struct task_struct *child)
4834{ 4826{
4835 /* 4827 task_lock(current);
4836 * We don't need to task_lock() current because current->cgroups
4837 * can't be changed concurrently here. The parent obviously hasn't
4838 * exited and called cgroup_exit(), and we are synchronized against
4839 * cgroup migration through threadgroup_change_begin().
4840 */
4841 child->cgroups = current->cgroups; 4828 child->cgroups = current->cgroups;
4842 get_css_set(child->cgroups); 4829 get_css_set(child->cgroups);
4830 task_unlock(current);
4843 INIT_LIST_HEAD(&child->cg_list); 4831 INIT_LIST_HEAD(&child->cg_list);
4844} 4832}
4845 4833
@@ -4895,19 +4883,10 @@ void cgroup_post_fork(struct task_struct *child)
4895 */ 4883 */
4896 if (use_task_css_set_links) { 4884 if (use_task_css_set_links) {
4897 write_lock(&css_set_lock); 4885 write_lock(&css_set_lock);
4898 if (list_empty(&child->cg_list)) { 4886 task_lock(child);
4899 /* 4887 if (list_empty(&child->cg_list))
4900 * It's safe to use child->cgroups without task_lock()
4901 * here because we are protected through
4902 * threadgroup_change_begin() against concurrent
4903 * css_set change in cgroup_task_migrate(). Also
4904 * the task can't exit at that point until
4905 * wake_up_new_task() is called, so we are protected
4906 * against cgroup_exit() setting child->cgroup to
4907 * init_css_set.
4908 */
4909 list_add(&child->cg_list, &child->cgroups->tasks); 4888 list_add(&child->cg_list, &child->cgroups->tasks);
4910 } 4889 task_unlock(child);
4911 write_unlock(&css_set_lock); 4890 write_unlock(&css_set_lock);
4912 } 4891 }
4913} 4892}
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 9a7b487c6fe2..fe8a916507ed 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -111,14 +111,16 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
111 * Count the number of breakpoints of the same type and same task. 111 * Count the number of breakpoints of the same type and same task.
112 * The given event must be not on the list. 112 * The given event must be not on the list.
113 */ 113 */
114static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) 114static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
115{ 115{
116 struct task_struct *tsk = bp->hw.bp_target; 116 struct task_struct *tsk = bp->hw.bp_target;
117 struct perf_event *iter; 117 struct perf_event *iter;
118 int count = 0; 118 int count = 0;
119 119
120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) 121 if (iter->hw.bp_target == tsk &&
122 find_slot_idx(iter) == type &&
123 cpu == iter->cpu)
122 count += hw_breakpoint_weight(iter); 124 count += hw_breakpoint_weight(iter);
123 } 125 }
124 126
@@ -141,7 +143,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
141 if (!tsk) 143 if (!tsk)
142 slots->pinned += max_task_bp_pinned(cpu, type); 144 slots->pinned += max_task_bp_pinned(cpu, type);
143 else 145 else
144 slots->pinned += task_bp_pinned(bp, type); 146 slots->pinned += task_bp_pinned(cpu, bp, type);
145 slots->flexible = per_cpu(nr_bp_flexible[type], cpu); 147 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
146 148
147 return; 149 return;
@@ -154,7 +156,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
154 if (!tsk) 156 if (!tsk)
155 nr += max_task_bp_pinned(cpu, type); 157 nr += max_task_bp_pinned(cpu, type);
156 else 158 else
157 nr += task_bp_pinned(bp, type); 159 nr += task_bp_pinned(cpu, bp, type);
158 160
159 if (nr > slots->pinned) 161 if (nr > slots->pinned)
160 slots->pinned = nr; 162 slots->pinned = nr;
@@ -188,7 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
188 int old_idx = 0; 190 int old_idx = 0;
189 int idx = 0; 191 int idx = 0;
190 192
191 old_count = task_bp_pinned(bp, type); 193 old_count = task_bp_pinned(cpu, bp, type);
192 old_idx = old_count - 1; 194 old_idx = old_count - 1;
193 idx = old_idx + weight; 195 idx = old_idx + weight;
194 196
diff --git a/kernel/futex.c b/kernel/futex.c
index 3717e7b306e0..19eb089ca003 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
716 struct futex_pi_state **ps, 716 struct futex_pi_state **ps,
717 struct task_struct *task, int set_waiters) 717 struct task_struct *task, int set_waiters)
718{ 718{
719 int lock_taken, ret, ownerdied = 0; 719 int lock_taken, ret, force_take = 0;
720 u32 uval, newval, curval, vpid = task_pid_vnr(task); 720 u32 uval, newval, curval, vpid = task_pid_vnr(task);
721 721
722retry: 722retry:
@@ -755,17 +755,15 @@ retry:
755 newval = curval | FUTEX_WAITERS; 755 newval = curval | FUTEX_WAITERS;
756 756
757 /* 757 /*
758 * There are two cases, where a futex might have no owner (the 758 * Should we force take the futex? See below.
759 * owner TID is 0): OWNER_DIED. We take over the futex in this
760 * case. We also do an unconditional take over, when the owner
761 * of the futex died.
762 *
763 * This is safe as we are protected by the hash bucket lock !
764 */ 759 */
765 if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { 760 if (unlikely(force_take)) {
766 /* Keep the OWNER_DIED bit */ 761 /*
762 * Keep the OWNER_DIED and the WAITERS bit and set the
763 * new TID value.
764 */
767 newval = (curval & ~FUTEX_TID_MASK) | vpid; 765 newval = (curval & ~FUTEX_TID_MASK) | vpid;
768 ownerdied = 0; 766 force_take = 0;
769 lock_taken = 1; 767 lock_taken = 1;
770 } 768 }
771 769
@@ -775,7 +773,7 @@ retry:
775 goto retry; 773 goto retry;
776 774
777 /* 775 /*
778 * We took the lock due to owner died take over. 776 * We took the lock due to forced take over.
779 */ 777 */
780 if (unlikely(lock_taken)) 778 if (unlikely(lock_taken))
781 return 1; 779 return 1;
@@ -790,20 +788,25 @@ retry:
790 switch (ret) { 788 switch (ret) {
791 case -ESRCH: 789 case -ESRCH:
792 /* 790 /*
793 * No owner found for this futex. Check if the 791 * We failed to find an owner for this
794 * OWNER_DIED bit is set to figure out whether 792 * futex. So we have no pi_state to block
795 * this is a robust futex or not. 793 * on. This can happen in two cases:
794 *
795 * 1) The owner died
796 * 2) A stale FUTEX_WAITERS bit
797 *
798 * Re-read the futex value.
796 */ 799 */
797 if (get_futex_value_locked(&curval, uaddr)) 800 if (get_futex_value_locked(&curval, uaddr))
798 return -EFAULT; 801 return -EFAULT;
799 802
800 /* 803 /*
801 * We simply start over in case of a robust 804 * If the owner died or we have a stale
802 * futex. The code above will take the futex 805 * WAITERS bit the owner TID in the user space
803 * and return happy. 806 * futex is 0.
804 */ 807 */
805 if (curval & FUTEX_OWNER_DIED) { 808 if (!(curval & FUTEX_TID_MASK)) {
806 ownerdied = 1; 809 force_take = 1;
807 goto retry; 810 goto retry;
808 } 811 }
809 default: 812 default:
@@ -840,6 +843,9 @@ static void wake_futex(struct futex_q *q)
840{ 843{
841 struct task_struct *p = q->task; 844 struct task_struct *p = q->task;
842 845
846 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
847 return;
848
843 /* 849 /*
844 * We set q->lock_ptr = NULL _before_ we wake up the task. If 850 * We set q->lock_ptr = NULL _before_ we wake up the task. If
845 * a non-futex wake up happens on another CPU then the task 851 * a non-futex wake up happens on another CPU then the task
@@ -1075,6 +1081,10 @@ retry_private:
1075 1081
1076 plist_for_each_entry_safe(this, next, head, list) { 1082 plist_for_each_entry_safe(this, next, head, list) {
1077 if (match_futex (&this->key, &key1)) { 1083 if (match_futex (&this->key, &key1)) {
1084 if (this->pi_state || this->rt_waiter) {
1085 ret = -EINVAL;
1086 goto out_unlock;
1087 }
1078 wake_futex(this); 1088 wake_futex(this);
1079 if (++ret >= nr_wake) 1089 if (++ret >= nr_wake)
1080 break; 1090 break;
@@ -1087,6 +1097,10 @@ retry_private:
1087 op_ret = 0; 1097 op_ret = 0;
1088 plist_for_each_entry_safe(this, next, head, list) { 1098 plist_for_each_entry_safe(this, next, head, list) {
1089 if (match_futex (&this->key, &key2)) { 1099 if (match_futex (&this->key, &key2)) {
1100 if (this->pi_state || this->rt_waiter) {
1101 ret = -EINVAL;
1102 goto out_unlock;
1103 }
1090 wake_futex(this); 1104 wake_futex(this);
1091 if (++op_ret >= nr_wake2) 1105 if (++op_ret >= nr_wake2)
1092 break; 1106 break;
@@ -1095,6 +1109,7 @@ retry_private:
1095 ret += op_ret; 1109 ret += op_ret;
1096 } 1110 }
1097 1111
1112out_unlock:
1098 double_unlock_hb(hb1, hb2); 1113 double_unlock_hb(hb1, hb2);
1099out_put_keys: 1114out_put_keys:
1100 put_futex_key(&key2); 1115 put_futex_key(&key2);
@@ -1384,9 +1399,13 @@ retry_private:
1384 /* 1399 /*
1385 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always 1400 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1386 * be paired with each other and no other futex ops. 1401 * be paired with each other and no other futex ops.
1402 *
1403 * We should never be requeueing a futex_q with a pi_state,
1404 * which is awaiting a futex_unlock_pi().
1387 */ 1405 */
1388 if ((requeue_pi && !this->rt_waiter) || 1406 if ((requeue_pi && !this->rt_waiter) ||
1389 (!requeue_pi && this->rt_waiter)) { 1407 (!requeue_pi && this->rt_waiter) ||
1408 this->pi_state) {
1390 ret = -EINVAL; 1409 ret = -EINVAL;
1391 break; 1410 break;
1392 } 1411 }
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 4646eb2c3820..767e559dfb10 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -21,10 +21,10 @@ struct key *modsign_keyring;
21extern __initdata const u8 modsign_certificate_list[]; 21extern __initdata const u8 modsign_certificate_list[];
22extern __initdata const u8 modsign_certificate_list_end[]; 22extern __initdata const u8 modsign_certificate_list_end[];
23asm(".section .init.data,\"aw\"\n" 23asm(".section .init.data,\"aw\"\n"
24 "modsign_certificate_list:\n" 24 SYMBOL_PREFIX "modsign_certificate_list:\n"
25 ".incbin \"signing_key.x509\"\n" 25 ".incbin \"signing_key.x509\"\n"
26 ".incbin \"extra_certificates\"\n" 26 ".incbin \"extra_certificates\"\n"
27 "modsign_certificate_list_end:" 27 SYMBOL_PREFIX "modsign_certificate_list_end:"
28 ); 28 );
29 29
30/* 30/*
diff --git a/kernel/module.c b/kernel/module.c
index 6085f5ef88ea..6e48c3a43599 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2293,12 +2293,17 @@ static void layout_symtab(struct module *mod, struct load_info *info)
2293 src = (void *)info->hdr + symsect->sh_offset; 2293 src = (void *)info->hdr + symsect->sh_offset;
2294 nsrc = symsect->sh_size / sizeof(*src); 2294 nsrc = symsect->sh_size / sizeof(*src);
2295 2295
2296 /* strtab always starts with a nul, so offset 0 is the empty string. */
2297 strtab_size = 1;
2298
2296 /* Compute total space required for the core symbols' strtab. */ 2299 /* Compute total space required for the core symbols' strtab. */
2297 for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src) 2300 for (ndst = i = 0; i < nsrc; i++) {
2298 if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) { 2301 if (i == 0 ||
2299 strtab_size += strlen(&info->strtab[src->st_name]) + 1; 2302 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2303 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2300 ndst++; 2304 ndst++;
2301 } 2305 }
2306 }
2302 2307
2303 /* Append room for core symbols at end of core part. */ 2308 /* Append room for core symbols at end of core part. */
2304 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); 2309 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
@@ -2332,15 +2337,15 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
2332 mod->core_symtab = dst = mod->module_core + info->symoffs; 2337 mod->core_symtab = dst = mod->module_core + info->symoffs;
2333 mod->core_strtab = s = mod->module_core + info->stroffs; 2338 mod->core_strtab = s = mod->module_core + info->stroffs;
2334 src = mod->symtab; 2339 src = mod->symtab;
2335 *dst = *src;
2336 *s++ = 0; 2340 *s++ = 0;
2337 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { 2341 for (ndst = i = 0; i < mod->num_symtab; i++) {
2338 if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) 2342 if (i == 0 ||
2339 continue; 2343 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2340 2344 dst[ndst] = src[i];
2341 dst[ndst] = *src; 2345 dst[ndst++].st_name = s - mod->core_strtab;
2342 dst[ndst++].st_name = s - mod->core_strtab; 2346 s += strlcpy(s, &mod->strtab[src[i].st_name],
2343 s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1; 2347 KSYM_NAME_LEN) + 1;
2348 }
2344 } 2349 }
2345 mod->core_num_syms = ndst; 2350 mod->core_num_syms = ndst;
2346} 2351}
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index ea1b1df5dbb0..f2970bddc5ea 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -27,13 +27,13 @@
27 * - Information block 27 * - Information block
28 */ 28 */
29struct module_signature { 29struct module_signature {
30 enum pkey_algo algo : 8; /* Public-key crypto algorithm */ 30 u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */
31 enum pkey_hash_algo hash : 8; /* Digest algorithm */ 31 u8 hash; /* Digest algorithm [enum pkey_hash_algo] */
32 enum pkey_id_type id_type : 8; /* Key identifier type */ 32 u8 id_type; /* Key identifier type [enum pkey_id_type] */
33 u8 signer_len; /* Length of signer's name */ 33 u8 signer_len; /* Length of signer's name */
34 u8 key_id_len; /* Length of key identifier */ 34 u8 key_id_len; /* Length of key identifier */
35 u8 __pad[3]; 35 u8 __pad[3];
36 __be32 sig_len; /* Length of signature data */ 36 __be32 sig_len; /* Length of signature data */
37}; 37};
38 38
39/* 39/*
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index eb00be205811..7b07cc0dfb75 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -71,12 +71,22 @@ err_alloc:
71 return NULL; 71 return NULL;
72} 72}
73 73
74/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
75#define MAX_PID_NS_LEVEL 32
76
74static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns) 77static struct pid_namespace *create_pid_namespace(struct pid_namespace *parent_pid_ns)
75{ 78{
76 struct pid_namespace *ns; 79 struct pid_namespace *ns;
77 unsigned int level = parent_pid_ns->level + 1; 80 unsigned int level = parent_pid_ns->level + 1;
78 int i, err = -ENOMEM; 81 int i;
82 int err;
83
84 if (level > MAX_PID_NS_LEVEL) {
85 err = -EINVAL;
86 goto out;
87 }
79 88
89 err = -ENOMEM;
80 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL); 90 ns = kmem_cache_zalloc(pid_ns_cachep, GFP_KERNEL);
81 if (ns == NULL) 91 if (ns == NULL)
82 goto out; 92 goto out;
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 0984a21076a3..15f60d01198b 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
143 143
144 p->signal->autogroup = autogroup_kref_get(ag); 144 p->signal->autogroup = autogroup_kref_get(ag);
145 145
146 if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
147 goto out;
148
149 t = p; 146 t = p;
150 do { 147 do {
151 sched_move_task(t); 148 sched_move_task(t);
152 } while_each_thread(p, t); 149 } while_each_thread(p, t);
153 150
154out:
155 unlock_task_sighand(p, &flags); 151 unlock_task_sighand(p, &flags);
156 autogroup_kref_put(prev); 152 autogroup_kref_put(prev);
157} 153}
diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h
index 8bd047142816..443232ebbb53 100644
--- a/kernel/sched/auto_group.h
+++ b/kernel/sched/auto_group.h
@@ -4,11 +4,6 @@
4#include <linux/rwsem.h> 4#include <linux/rwsem.h>
5 5
6struct autogroup { 6struct autogroup {
7 /*
8 * reference doesn't mean how many thread attach to this
9 * autogroup now. It just stands for the number of task
10 * could use this autogroup.
11 */
12 struct kref kref; 7 struct kref kref;
13 struct task_group *tg; 8 struct task_group *tg;
14 struct rw_semaphore lock; 9 struct rw_semaphore lock;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9d4c8d5a1f53..c8c21be11ab4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -116,7 +116,7 @@ static unsigned long get_timestamp(int this_cpu)
116 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ 116 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
117} 117}
118 118
119static unsigned long get_sample_period(void) 119static u64 get_sample_period(void)
120{ 120{
121 /* 121 /*
122 * convert watchdog_thresh from seconds to ns 122 * convert watchdog_thresh from seconds to ns
@@ -125,7 +125,7 @@ static unsigned long get_sample_period(void)
125 * and hard thresholds) to increment before the 125 * and hard thresholds) to increment before the
126 * hardlockup detector generates a warning 126 * hardlockup detector generates a warning
127 */ 127 */
128 return get_softlockup_thresh() * (NSEC_PER_SEC / 5); 128 return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
129} 129}
130 130
131/* Commands for resetting the watchdog */ 131/* Commands for resetting the watchdog */
@@ -368,6 +368,9 @@ static void watchdog_disable(unsigned int cpu)
368{ 368{
369 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 369 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
370 370
371 if (!watchdog_enabled)
372 return;
373
371 watchdog_set_prio(SCHED_NORMAL, 0); 374 watchdog_set_prio(SCHED_NORMAL, 0);
372 hrtimer_cancel(hrtimer); 375 hrtimer_cancel(hrtimer);
373 /* disable the perf event */ 376 /* disable the perf event */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index d951daa0ca9a..1dae900df798 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1361,8 +1361,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1361 1361
1362 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1362 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1363 timer->data != (unsigned long)dwork); 1363 timer->data != (unsigned long)dwork);
1364 BUG_ON(timer_pending(timer)); 1364 WARN_ON_ONCE(timer_pending(timer));
1365 BUG_ON(!list_empty(&work->entry)); 1365 WARN_ON_ONCE(!list_empty(&work->entry));
1366
1367 /*
1368 * If @delay is 0, queue @dwork->work immediately. This is for
1369 * both optimization and correctness. The earliest @timer can
1370 * expire is on the closest next tick and delayed_work users depend
1371 * on that there's no such delay when @delay is 0.
1372 */
1373 if (!delay) {
1374 __queue_work(cpu, wq, &dwork->work);
1375 return;
1376 }
1366 1377
1367 timer_stats_timer_set_start_info(&dwork->timer); 1378 timer_stats_timer_set_start_info(&dwork->timer);
1368 1379
@@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1417 bool ret = false; 1428 bool ret = false;
1418 unsigned long flags; 1429 unsigned long flags;
1419 1430
1420 if (!delay)
1421 return queue_work_on(cpu, wq, &dwork->work);
1422
1423 /* read the comment in __queue_work() */ 1431 /* read the comment in __queue_work() */
1424 local_irq_save(flags); 1432 local_irq_save(flags);
1425 1433
@@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq)
2407repeat: 2415repeat:
2408 set_current_state(TASK_INTERRUPTIBLE); 2416 set_current_state(TASK_INTERRUPTIBLE);
2409 2417
2410 if (kthread_should_stop()) 2418 if (kthread_should_stop()) {
2419 __set_current_state(TASK_RUNNING);
2411 return 0; 2420 return 0;
2421 }
2412 2422
2413 /* 2423 /*
2414 * See whether any cpu is asking for help. Unbounded 2424 * See whether any cpu is asking for help. Unbounded
@@ -2982,7 +2992,7 @@ bool cancel_delayed_work(struct delayed_work *dwork)
2982 2992
2983 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work)); 2993 set_work_cpu_and_clear_pending(&dwork->work, work_cpu(&dwork->work));
2984 local_irq_restore(flags); 2994 local_irq_restore(flags);
2985 return true; 2995 return ret;
2986} 2996}
2987EXPORT_SYMBOL(cancel_delayed_work); 2997EXPORT_SYMBOL(cancel_delayed_work);
2988 2998