aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-04-27 15:43:10 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-27 15:43:10 -0400
commitc0cc53162a0644dd57dce5e2fbb9bbafdc57d183 (patch)
tree02393c85628c6ec7d0d942e880623b7f37cf3460 /kernel
parent8c14586fc320acfed8a0048eb21d1f2e2856fc36 (diff)
parentf28f20da704d399fb1e4d8838ffd697a357d9cc8 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Minor overlapping changes in the conflicts. In the macsec case, the change of the default ID macro name overlapped with the 64-bit netlink attribute alignment fixes in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/verifier.c1
-rw-r--r--kernel/cpu.c33
-rw-r--r--kernel/futex.c27
-rw-r--r--kernel/irq/ipi.c1
-rw-r--r--kernel/locking/qspinlock_stat.h8
5 files changed, 55 insertions, 15 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 6345623d6b02..56f18068b52b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2092,7 +2092,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
2092 if (IS_ERR(map)) { 2092 if (IS_ERR(map)) {
2093 verbose("fd %d is not pointing to valid bpf_map\n", 2093 verbose("fd %d is not pointing to valid bpf_map\n",
2094 insn->imm); 2094 insn->imm);
2095 fdput(f);
2096 return PTR_ERR(map); 2095 return PTR_ERR(map);
2097 } 2096 }
2098 2097
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ea42e8da861..3e3f6e49eabb 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -36,6 +36,7 @@
36 * @target: The target state 36 * @target: The target state
37 * @thread: Pointer to the hotplug thread 37 * @thread: Pointer to the hotplug thread
38 * @should_run: Thread should execute 38 * @should_run: Thread should execute
39 * @rollback: Perform a rollback
39 * @cb_stat: The state for a single callback (install/uninstall) 40 * @cb_stat: The state for a single callback (install/uninstall)
40 * @cb: Single callback function (install/uninstall) 41 * @cb: Single callback function (install/uninstall)
41 * @result: Result of the operation 42 * @result: Result of the operation
@@ -47,6 +48,7 @@ struct cpuhp_cpu_state {
47#ifdef CONFIG_SMP 48#ifdef CONFIG_SMP
48 struct task_struct *thread; 49 struct task_struct *thread;
49 bool should_run; 50 bool should_run;
51 bool rollback;
50 enum cpuhp_state cb_state; 52 enum cpuhp_state cb_state;
51 int (*cb)(unsigned int cpu); 53 int (*cb)(unsigned int cpu);
52 int result; 54 int result;
@@ -301,6 +303,11 @@ static int cpu_notify(unsigned long val, unsigned int cpu)
301 return __cpu_notify(val, cpu, -1, NULL); 303 return __cpu_notify(val, cpu, -1, NULL);
302} 304}
303 305
306static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
307{
308 BUG_ON(cpu_notify(val, cpu));
309}
310
304/* Notifier wrappers for transitioning to state machine */ 311/* Notifier wrappers for transitioning to state machine */
305static int notify_prepare(unsigned int cpu) 312static int notify_prepare(unsigned int cpu)
306{ 313{
@@ -477,6 +484,16 @@ static void cpuhp_thread_fun(unsigned int cpu)
477 } else { 484 } else {
478 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); 485 ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb);
479 } 486 }
487 } else if (st->rollback) {
488 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
489
490 undo_cpu_down(cpu, st, cpuhp_ap_states);
491 /*
492 * This is a momentary workaround to keep the notifier users
493 * happy. Will go away once we got rid of the notifiers.
494 */
495 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
496 st->rollback = false;
480 } else { 497 } else {
481 /* Cannot happen .... */ 498 /* Cannot happen .... */
482 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); 499 BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE);
@@ -636,11 +653,6 @@ static inline void check_for_tasks(int dead_cpu)
636 read_unlock(&tasklist_lock); 653 read_unlock(&tasklist_lock);
637} 654}
638 655
639static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
640{
641 BUG_ON(cpu_notify(val, cpu));
642}
643
644static int notify_down_prepare(unsigned int cpu) 656static int notify_down_prepare(unsigned int cpu)
645{ 657{
646 int err, nr_calls = 0; 658 int err, nr_calls = 0;
@@ -721,9 +733,10 @@ static int takedown_cpu(unsigned int cpu)
721 */ 733 */
722 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu)); 734 err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
723 if (err) { 735 if (err) {
724 /* CPU didn't die: tell everyone. Can't complain. */ 736 /* CPU refused to die */
725 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
726 irq_unlock_sparse(); 737 irq_unlock_sparse();
738 /* Unpark the hotplug thread so we can rollback there */
739 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
727 return err; 740 return err;
728 } 741 }
729 BUG_ON(cpu_online(cpu)); 742 BUG_ON(cpu_online(cpu));
@@ -832,6 +845,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
832 * to do the further cleanups. 845 * to do the further cleanups.
833 */ 846 */
834 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); 847 ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target);
848 if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) {
849 st->target = prev_state;
850 st->rollback = true;
851 cpuhp_kick_ap_work(cpu);
852 }
835 853
836 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; 854 hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
837out: 855out:
@@ -1249,6 +1267,7 @@ static struct cpuhp_step cpuhp_ap_states[] = {
1249 .name = "notify:online", 1267 .name = "notify:online",
1250 .startup = notify_online, 1268 .startup = notify_online,
1251 .teardown = notify_down_prepare, 1269 .teardown = notify_down_prepare,
1270 .skip_onerr = true,
1252 }, 1271 },
1253#endif 1272#endif
1254 /* 1273 /*
diff --git a/kernel/futex.c b/kernel/futex.c
index a5d2e74c89e0..c20f06f38ef3 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1295,10 +1295,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
1295 if (unlikely(should_fail_futex(true))) 1295 if (unlikely(should_fail_futex(true)))
1296 ret = -EFAULT; 1296 ret = -EFAULT;
1297 1297
1298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) 1298 if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
1299 ret = -EFAULT; 1299 ret = -EFAULT;
1300 else if (curval != uval) 1300 } else if (curval != uval) {
1301 ret = -EINVAL; 1301 /*
1302 * If a unconditional UNLOCK_PI operation (user space did not
1303 * try the TID->0 transition) raced with a waiter setting the
1304 * FUTEX_WAITERS flag between get_user() and locking the hash
1305 * bucket lock, retry the operation.
1306 */
1307 if ((FUTEX_TID_MASK & curval) == uval)
1308 ret = -EAGAIN;
1309 else
1310 ret = -EINVAL;
1311 }
1302 if (ret) { 1312 if (ret) {
1303 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 1313 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
1304 return ret; 1314 return ret;
@@ -1525,8 +1535,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1525 if (likely(&hb1->chain != &hb2->chain)) { 1535 if (likely(&hb1->chain != &hb2->chain)) {
1526 plist_del(&q->list, &hb1->chain); 1536 plist_del(&q->list, &hb1->chain);
1527 hb_waiters_dec(hb1); 1537 hb_waiters_dec(hb1);
1528 plist_add(&q->list, &hb2->chain);
1529 hb_waiters_inc(hb2); 1538 hb_waiters_inc(hb2);
1539 plist_add(&q->list, &hb2->chain);
1530 q->lock_ptr = &hb2->lock; 1540 q->lock_ptr = &hb2->lock;
1531 } 1541 }
1532 get_futex_key_refs(key2); 1542 get_futex_key_refs(key2);
@@ -2623,6 +2633,15 @@ retry:
2623 if (ret == -EFAULT) 2633 if (ret == -EFAULT)
2624 goto pi_faulted; 2634 goto pi_faulted;
2625 /* 2635 /*
2636 * A unconditional UNLOCK_PI op raced against a waiter
2637 * setting the FUTEX_WAITERS bit. Try again.
2638 */
2639 if (ret == -EAGAIN) {
2640 spin_unlock(&hb->lock);
2641 put_futex_key(&key);
2642 goto retry;
2643 }
2644 /*
2626 * wake_futex_pi has detected invalid state. Tell user 2645 * wake_futex_pi has detected invalid state. Tell user
2627 * space. 2646 * space.
2628 */ 2647 */
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index c37f34b00a11..14777af8e097 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -94,6 +94,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
94 data = irq_get_irq_data(virq + i); 94 data = irq_get_irq_data(virq + i);
95 cpumask_copy(data->common->affinity, dest); 95 cpumask_copy(data->common->affinity, dest);
96 data->common->ipi_offset = offset; 96 data->common->ipi_offset = offset;
97 irq_set_status_flags(virq + i, IRQ_NO_BALANCING);
97 } 98 }
98 return virq; 99 return virq;
99 100
diff --git a/kernel/locking/qspinlock_stat.h b/kernel/locking/qspinlock_stat.h
index eb2a2c9bc3fc..d734b7502001 100644
--- a/kernel/locking/qspinlock_stat.h
+++ b/kernel/locking/qspinlock_stat.h
@@ -136,10 +136,12 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
136 } 136 }
137 137
138 if (counter == qstat_pv_hash_hops) { 138 if (counter == qstat_pv_hash_hops) {
139 u64 frac; 139 u64 frac = 0;
140 140
141 frac = 100ULL * do_div(stat, kicks); 141 if (kicks) {
142 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks); 142 frac = 100ULL * do_div(stat, kicks);
143 frac = DIV_ROUND_CLOSEST_ULL(frac, kicks);
144 }
143 145
144 /* 146 /*
145 * Return a X.XX decimal number 147 * Return a X.XX decimal number