aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/hw_breakpoint.c12
-rw-r--r--kernel/futex.c18
-rw-r--r--kernel/modsign_pubkey.c4
-rw-r--r--kernel/module_signing.c14
-rw-r--r--kernel/sched/auto_group.c4
-rw-r--r--kernel/sched/auto_group.h5
-rw-r--r--kernel/watchdog.c7
-rw-r--r--kernel/workqueue.c22
8 files changed, 54 insertions, 32 deletions
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 9a7b487c6fe2..fe8a916507ed 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -111,14 +111,16 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
111 * Count the number of breakpoints of the same type and same task. 111 * Count the number of breakpoints of the same type and same task.
112 * The given event must be not on the list. 112 * The given event must be not on the list.
113 */ 113 */
114static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) 114static int task_bp_pinned(int cpu, struct perf_event *bp, enum bp_type_idx type)
115{ 115{
116 struct task_struct *tsk = bp->hw.bp_target; 116 struct task_struct *tsk = bp->hw.bp_target;
117 struct perf_event *iter; 117 struct perf_event *iter;
118 int count = 0; 118 int count = 0;
119 119
120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) { 120 list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
121 if (iter->hw.bp_target == tsk && find_slot_idx(iter) == type) 121 if (iter->hw.bp_target == tsk &&
122 find_slot_idx(iter) == type &&
123 cpu == iter->cpu)
122 count += hw_breakpoint_weight(iter); 124 count += hw_breakpoint_weight(iter);
123 } 125 }
124 126
@@ -141,7 +143,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
141 if (!tsk) 143 if (!tsk)
142 slots->pinned += max_task_bp_pinned(cpu, type); 144 slots->pinned += max_task_bp_pinned(cpu, type);
143 else 145 else
144 slots->pinned += task_bp_pinned(bp, type); 146 slots->pinned += task_bp_pinned(cpu, bp, type);
145 slots->flexible = per_cpu(nr_bp_flexible[type], cpu); 147 slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
146 148
147 return; 149 return;
@@ -154,7 +156,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
154 if (!tsk) 156 if (!tsk)
155 nr += max_task_bp_pinned(cpu, type); 157 nr += max_task_bp_pinned(cpu, type);
156 else 158 else
157 nr += task_bp_pinned(bp, type); 159 nr += task_bp_pinned(cpu, bp, type);
158 160
159 if (nr > slots->pinned) 161 if (nr > slots->pinned)
160 slots->pinned = nr; 162 slots->pinned = nr;
@@ -188,7 +190,7 @@ static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable,
188 int old_idx = 0; 190 int old_idx = 0;
189 int idx = 0; 191 int idx = 0;
190 192
191 old_count = task_bp_pinned(bp, type); 193 old_count = task_bp_pinned(cpu, bp, type);
192 old_idx = old_count - 1; 194 old_idx = old_count - 1;
193 idx = old_idx + weight; 195 idx = old_idx + weight;
194 196
diff --git a/kernel/futex.c b/kernel/futex.c
index 20ef219bbe9b..19eb089ca003 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -843,6 +843,9 @@ static void wake_futex(struct futex_q *q)
843{ 843{
844 struct task_struct *p = q->task; 844 struct task_struct *p = q->task;
845 845
846 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
847 return;
848
846 /* 849 /*
847 * We set q->lock_ptr = NULL _before_ we wake up the task. If 850 * We set q->lock_ptr = NULL _before_ we wake up the task. If
848 * a non-futex wake up happens on another CPU then the task 851 * a non-futex wake up happens on another CPU then the task
@@ -1078,6 +1081,10 @@ retry_private:
1078 1081
1079 plist_for_each_entry_safe(this, next, head, list) { 1082 plist_for_each_entry_safe(this, next, head, list) {
1080 if (match_futex (&this->key, &key1)) { 1083 if (match_futex (&this->key, &key1)) {
1084 if (this->pi_state || this->rt_waiter) {
1085 ret = -EINVAL;
1086 goto out_unlock;
1087 }
1081 wake_futex(this); 1088 wake_futex(this);
1082 if (++ret >= nr_wake) 1089 if (++ret >= nr_wake)
1083 break; 1090 break;
@@ -1090,6 +1097,10 @@ retry_private:
1090 op_ret = 0; 1097 op_ret = 0;
1091 plist_for_each_entry_safe(this, next, head, list) { 1098 plist_for_each_entry_safe(this, next, head, list) {
1092 if (match_futex (&this->key, &key2)) { 1099 if (match_futex (&this->key, &key2)) {
1100 if (this->pi_state || this->rt_waiter) {
1101 ret = -EINVAL;
1102 goto out_unlock;
1103 }
1093 wake_futex(this); 1104 wake_futex(this);
1094 if (++op_ret >= nr_wake2) 1105 if (++op_ret >= nr_wake2)
1095 break; 1106 break;
@@ -1098,6 +1109,7 @@ retry_private:
1098 ret += op_ret; 1109 ret += op_ret;
1099 } 1110 }
1100 1111
1112out_unlock:
1101 double_unlock_hb(hb1, hb2); 1113 double_unlock_hb(hb1, hb2);
1102out_put_keys: 1114out_put_keys:
1103 put_futex_key(&key2); 1115 put_futex_key(&key2);
@@ -1387,9 +1399,13 @@ retry_private:
1387 /* 1399 /*
1388 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always 1400 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1389 * be paired with each other and no other futex ops. 1401 * be paired with each other and no other futex ops.
1402 *
1403 * We should never be requeueing a futex_q with a pi_state,
1404 * which is awaiting a futex_unlock_pi().
1390 */ 1405 */
1391 if ((requeue_pi && !this->rt_waiter) || 1406 if ((requeue_pi && !this->rt_waiter) ||
1392 (!requeue_pi && this->rt_waiter)) { 1407 (!requeue_pi && this->rt_waiter) ||
1408 this->pi_state) {
1393 ret = -EINVAL; 1409 ret = -EINVAL;
1394 break; 1410 break;
1395 } 1411 }
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c
index 4646eb2c3820..767e559dfb10 100644
--- a/kernel/modsign_pubkey.c
+++ b/kernel/modsign_pubkey.c
@@ -21,10 +21,10 @@ struct key *modsign_keyring;
21extern __initdata const u8 modsign_certificate_list[]; 21extern __initdata const u8 modsign_certificate_list[];
22extern __initdata const u8 modsign_certificate_list_end[]; 22extern __initdata const u8 modsign_certificate_list_end[];
23asm(".section .init.data,\"aw\"\n" 23asm(".section .init.data,\"aw\"\n"
24 "modsign_certificate_list:\n" 24 SYMBOL_PREFIX "modsign_certificate_list:\n"
25 ".incbin \"signing_key.x509\"\n" 25 ".incbin \"signing_key.x509\"\n"
26 ".incbin \"extra_certificates\"\n" 26 ".incbin \"extra_certificates\"\n"
27 "modsign_certificate_list_end:" 27 SYMBOL_PREFIX "modsign_certificate_list_end:"
28 ); 28 );
29 29
30/* 30/*
diff --git a/kernel/module_signing.c b/kernel/module_signing.c
index ea1b1df5dbb0..f2970bddc5ea 100644
--- a/kernel/module_signing.c
+++ b/kernel/module_signing.c
@@ -27,13 +27,13 @@
27 * - Information block 27 * - Information block
28 */ 28 */
29struct module_signature { 29struct module_signature {
30 enum pkey_algo algo : 8; /* Public-key crypto algorithm */ 30 u8 algo; /* Public-key crypto algorithm [enum pkey_algo] */
31 enum pkey_hash_algo hash : 8; /* Digest algorithm */ 31 u8 hash; /* Digest algorithm [enum pkey_hash_algo] */
32 enum pkey_id_type id_type : 8; /* Key identifier type */ 32 u8 id_type; /* Key identifier type [enum pkey_id_type] */
33 u8 signer_len; /* Length of signer's name */ 33 u8 signer_len; /* Length of signer's name */
34 u8 key_id_len; /* Length of key identifier */ 34 u8 key_id_len; /* Length of key identifier */
35 u8 __pad[3]; 35 u8 __pad[3];
36 __be32 sig_len; /* Length of signature data */ 36 __be32 sig_len; /* Length of signature data */
37}; 37};
38 38
39/* 39/*
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
index 0984a21076a3..15f60d01198b 100644
--- a/kernel/sched/auto_group.c
+++ b/kernel/sched/auto_group.c
@@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
143 143
144 p->signal->autogroup = autogroup_kref_get(ag); 144 p->signal->autogroup = autogroup_kref_get(ag);
145 145
146 if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
147 goto out;
148
149 t = p; 146 t = p;
150 do { 147 do {
151 sched_move_task(t); 148 sched_move_task(t);
152 } while_each_thread(p, t); 149 } while_each_thread(p, t);
153 150
154out:
155 unlock_task_sighand(p, &flags); 151 unlock_task_sighand(p, &flags);
156 autogroup_kref_put(prev); 152 autogroup_kref_put(prev);
157} 153}
diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h
index 8bd047142816..443232ebbb53 100644
--- a/kernel/sched/auto_group.h
+++ b/kernel/sched/auto_group.h
@@ -4,11 +4,6 @@
4#include <linux/rwsem.h> 4#include <linux/rwsem.h>
5 5
6struct autogroup { 6struct autogroup {
7 /*
8 * reference doesn't mean how many thread attach to this
9 * autogroup now. It just stands for the number of task
10 * could use this autogroup.
11 */
12 struct kref kref; 7 struct kref kref;
13 struct task_group *tg; 8 struct task_group *tg;
14 struct rw_semaphore lock; 9 struct rw_semaphore lock;
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9d4c8d5a1f53..c8c21be11ab4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -116,7 +116,7 @@ static unsigned long get_timestamp(int this_cpu)
116 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ 116 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
117} 117}
118 118
119static unsigned long get_sample_period(void) 119static u64 get_sample_period(void)
120{ 120{
121 /* 121 /*
122 * convert watchdog_thresh from seconds to ns 122 * convert watchdog_thresh from seconds to ns
@@ -125,7 +125,7 @@ static unsigned long get_sample_period(void)
125 * and hard thresholds) to increment before the 125 * and hard thresholds) to increment before the
126 * hardlockup detector generates a warning 126 * hardlockup detector generates a warning
127 */ 127 */
128 return get_softlockup_thresh() * (NSEC_PER_SEC / 5); 128 return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
129} 129}
130 130
131/* Commands for resetting the watchdog */ 131/* Commands for resetting the watchdog */
@@ -368,6 +368,9 @@ static void watchdog_disable(unsigned int cpu)
368{ 368{
369 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 369 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
370 370
371 if (!watchdog_enabled)
372 return;
373
371 watchdog_set_prio(SCHED_NORMAL, 0); 374 watchdog_set_prio(SCHED_NORMAL, 0);
372 hrtimer_cancel(hrtimer); 375 hrtimer_cancel(hrtimer);
373 /* disable the perf event */ 376 /* disable the perf event */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 042d221d33cc..1dae900df798 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1361,8 +1361,19 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1361 1361
1362 WARN_ON_ONCE(timer->function != delayed_work_timer_fn || 1362 WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1363 timer->data != (unsigned long)dwork); 1363 timer->data != (unsigned long)dwork);
1364 BUG_ON(timer_pending(timer)); 1364 WARN_ON_ONCE(timer_pending(timer));
1365 BUG_ON(!list_empty(&work->entry)); 1365 WARN_ON_ONCE(!list_empty(&work->entry));
1366
1367 /*
1368 * If @delay is 0, queue @dwork->work immediately. This is for
1369 * both optimization and correctness. The earliest @timer can
1370 * expire is on the closest next tick and delayed_work users depend
1371 * on that there's no such delay when @delay is 0.
1372 */
1373 if (!delay) {
1374 __queue_work(cpu, wq, &dwork->work);
1375 return;
1376 }
1366 1377
1367 timer_stats_timer_set_start_info(&dwork->timer); 1378 timer_stats_timer_set_start_info(&dwork->timer);
1368 1379
@@ -1417,9 +1428,6 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1417 bool ret = false; 1428 bool ret = false;
1418 unsigned long flags; 1429 unsigned long flags;
1419 1430
1420 if (!delay)
1421 return queue_work_on(cpu, wq, &dwork->work);
1422
1423 /* read the comment in __queue_work() */ 1431 /* read the comment in __queue_work() */
1424 local_irq_save(flags); 1432 local_irq_save(flags);
1425 1433
@@ -2407,8 +2415,10 @@ static int rescuer_thread(void *__wq)
2407repeat: 2415repeat:
2408 set_current_state(TASK_INTERRUPTIBLE); 2416 set_current_state(TASK_INTERRUPTIBLE);
2409 2417
2410 if (kthread_should_stop()) 2418 if (kthread_should_stop()) {
2419 __set_current_state(TASK_RUNNING);
2411 return 0; 2420 return 0;
2421 }
2412 2422
2413 /* 2423 /*
2414 * See whether any cpu is asking for help. Unbounded 2424 * See whether any cpu is asking for help. Unbounded