aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-25 20:11:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-25 20:11:43 -0400
commitf89eae4ee7e075e576bd4b4d2db901023421a3be (patch)
tree8dbd69cdc48c15f6b623e882b31092b7c451803f
parentbdc6b758e443c21c39a14c075e5b7e01f095b37b (diff)
parentb7e7ade34e6188bee2e3b0d42b51d25137d9e2a5 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Two fixes: one for a lost wakeup, the other to fix the compiler optimizing out preempt operations on ARM64 (and possibly other non-x86 architectures)" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/core: Fix remote wakeups sched/preempt: Fix preempt_count manipulations
-rw-r--r--include/asm-generic/preempt.h4
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched/core.c18
3 files changed, 14 insertions, 9 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index 5d8ffa3e6f8c..c1cde3577551 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -7,10 +7,10 @@
7 7
8static __always_inline int preempt_count(void) 8static __always_inline int preempt_count(void)
9{ 9{
10 return current_thread_info()->preempt_count; 10 return READ_ONCE(current_thread_info()->preempt_count);
11} 11}
12 12
13static __always_inline int *preempt_count_ptr(void) 13static __always_inline volatile int *preempt_count_ptr(void)
14{ 14{
15 return &current_thread_info()->preempt_count; 15 return &current_thread_info()->preempt_count;
16} 16}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 21c26e78aec5..23e075dcdfe4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1539,6 +1539,7 @@ struct task_struct {
1539 unsigned sched_reset_on_fork:1; 1539 unsigned sched_reset_on_fork:1;
1540 unsigned sched_contributes_to_load:1; 1540 unsigned sched_contributes_to_load:1;
1541 unsigned sched_migrated:1; 1541 unsigned sched_migrated:1;
1542 unsigned sched_remote_wakeup:1;
1542 unsigned :0; /* force alignment to the next boundary */ 1543 unsigned :0; /* force alignment to the next boundary */
1543 1544
1544 /* unserialized, strictly 'current' */ 1545 /* unserialized, strictly 'current' */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 404c0784b1fc..7f2cae4620c7 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1768,13 +1768,15 @@ void sched_ttwu_pending(void)
1768 cookie = lockdep_pin_lock(&rq->lock); 1768 cookie = lockdep_pin_lock(&rq->lock);
1769 1769
1770 while (llist) { 1770 while (llist) {
1771 int wake_flags = 0;
1772
1771 p = llist_entry(llist, struct task_struct, wake_entry); 1773 p = llist_entry(llist, struct task_struct, wake_entry);
1772 llist = llist_next(llist); 1774 llist = llist_next(llist);
1773 /* 1775
1774 * See ttwu_queue(); we only call ttwu_queue_remote() when 1776 if (p->sched_remote_wakeup)
1775 * its a x-cpu wakeup. 1777 wake_flags = WF_MIGRATED;
1776 */ 1778
1777 ttwu_do_activate(rq, p, WF_MIGRATED, cookie); 1779 ttwu_do_activate(rq, p, wake_flags, cookie);
1778 } 1780 }
1779 1781
1780 lockdep_unpin_lock(&rq->lock, cookie); 1782 lockdep_unpin_lock(&rq->lock, cookie);
@@ -1819,10 +1821,12 @@ void scheduler_ipi(void)
1819 irq_exit(); 1821 irq_exit();
1820} 1822}
1821 1823
1822static void ttwu_queue_remote(struct task_struct *p, int cpu) 1824static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
1823{ 1825{
1824 struct rq *rq = cpu_rq(cpu); 1826 struct rq *rq = cpu_rq(cpu);
1825 1827
1828 p->sched_remote_wakeup = !!(wake_flags & WF_MIGRATED);
1829
1826 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) { 1830 if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) {
1827 if (!set_nr_if_polling(rq->idle)) 1831 if (!set_nr_if_polling(rq->idle))
1828 smp_send_reschedule(cpu); 1832 smp_send_reschedule(cpu);
@@ -1869,7 +1873,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
1869#if defined(CONFIG_SMP) 1873#if defined(CONFIG_SMP)
1870 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { 1874 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
1871 sched_clock_cpu(cpu); /* sync clocks x-cpu */ 1875 sched_clock_cpu(cpu); /* sync clocks x-cpu */
1872 ttwu_queue_remote(p, cpu); 1876 ttwu_queue_remote(p, cpu, wake_flags);
1873 return; 1877 return;
1874 } 1878 }
1875#endif 1879#endif