diff options
| -rw-r--r-- | kernel/sched/core.c | 36 | ||||
| -rw-r--r-- | kernel/sched/cputime.c | 3 | ||||
| -rw-r--r-- | kernel/sched/rt.c | 2 | ||||
| -rw-r--r-- | kernel/sched/sched.h | 3 | ||||
| -rw-r--r-- | kernel/sched/wait.c | 16 |
5 files changed, 45 insertions, 15 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4d568ac9319e..7063c6a07440 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -1947,13 +1947,38 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
| 1947 | 1947 | ||
| 1948 | #ifdef CONFIG_SMP | 1948 | #ifdef CONFIG_SMP |
| 1949 | /* | 1949 | /* |
| 1950 | * Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be | ||
| 1951 | * possible to, falsely, observe p->on_cpu == 0. | ||
| 1952 | * | ||
| 1953 | * One must be running (->on_cpu == 1) in order to remove oneself | ||
| 1954 | * from the runqueue. | ||
| 1955 | * | ||
| 1956 | * [S] ->on_cpu = 1; [L] ->on_rq | ||
| 1957 | * UNLOCK rq->lock | ||
| 1958 | * RMB | ||
| 1959 | * LOCK rq->lock | ||
| 1960 | * [S] ->on_rq = 0; [L] ->on_cpu | ||
| 1961 | * | ||
| 1962 | * Pairs with the full barrier implied in the UNLOCK+LOCK on rq->lock | ||
| 1963 | * from the consecutive calls to schedule(); the first switching to our | ||
| 1964 | * task, the second putting it to sleep. | ||
| 1965 | */ | ||
| 1966 | smp_rmb(); | ||
| 1967 | |||
| 1968 | /* | ||
| 1950 | * If the owning (remote) cpu is still in the middle of schedule() with | 1969 | * If the owning (remote) cpu is still in the middle of schedule() with |
| 1951 | * this task as prev, wait until its done referencing the task. | 1970 | * this task as prev, wait until its done referencing the task. |
| 1952 | */ | 1971 | */ |
| 1953 | while (p->on_cpu) | 1972 | while (p->on_cpu) |
| 1954 | cpu_relax(); | 1973 | cpu_relax(); |
| 1955 | /* | 1974 | /* |
| 1956 | * Pairs with the smp_wmb() in finish_lock_switch(). | 1975 | * Combined with the control dependency above, we have an effective |
| 1976 | * smp_load_acquire() without the need for full barriers. | ||
| 1977 | * | ||
| 1978 | * Pairs with the smp_store_release() in finish_lock_switch(). | ||
| 1979 | * | ||
| 1980 | * This ensures that tasks getting woken will be fully ordered against | ||
| 1981 | * their previous state and preserve Program Order. | ||
| 1957 | */ | 1982 | */ |
| 1958 | smp_rmb(); | 1983 | smp_rmb(); |
| 1959 | 1984 | ||
| @@ -2039,7 +2064,6 @@ out: | |||
| 2039 | */ | 2064 | */ |
| 2040 | int wake_up_process(struct task_struct *p) | 2065 | int wake_up_process(struct task_struct *p) |
| 2041 | { | 2066 | { |
| 2042 | WARN_ON(task_is_stopped_or_traced(p)); | ||
| 2043 | return try_to_wake_up(p, TASK_NORMAL, 0); | 2067 | return try_to_wake_up(p, TASK_NORMAL, 0); |
| 2044 | } | 2068 | } |
| 2045 | EXPORT_SYMBOL(wake_up_process); | 2069 | EXPORT_SYMBOL(wake_up_process); |
| @@ -5847,13 +5871,13 @@ static int init_rootdomain(struct root_domain *rd) | |||
| 5847 | { | 5871 | { |
| 5848 | memset(rd, 0, sizeof(*rd)); | 5872 | memset(rd, 0, sizeof(*rd)); |
| 5849 | 5873 | ||
| 5850 | if (!alloc_cpumask_var(&rd->span, GFP_KERNEL)) | 5874 | if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) |
| 5851 | goto out; | 5875 | goto out; |
| 5852 | if (!alloc_cpumask_var(&rd->online, GFP_KERNEL)) | 5876 | if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) |
| 5853 | goto free_span; | 5877 | goto free_span; |
| 5854 | if (!alloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) | 5878 | if (!zalloc_cpumask_var(&rd->dlo_mask, GFP_KERNEL)) |
| 5855 | goto free_online; | 5879 | goto free_online; |
| 5856 | if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) | 5880 | if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) |
| 5857 | goto free_dlo_mask; | 5881 | goto free_dlo_mask; |
| 5858 | 5882 | ||
| 5859 | init_dl_bw(&rd->dl_bw); | 5883 | init_dl_bw(&rd->dl_bw); |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index 26a54461bf59..05de80b48586 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
| @@ -788,6 +788,9 @@ cputime_t task_gtime(struct task_struct *t) | |||
| 788 | unsigned int seq; | 788 | unsigned int seq; |
| 789 | cputime_t gtime; | 789 | cputime_t gtime; |
| 790 | 790 | ||
| 791 | if (!context_tracking_is_enabled()) | ||
| 792 | return t->gtime; | ||
| 793 | |||
| 791 | do { | 794 | do { |
| 792 | seq = read_seqbegin(&t->vtime_seqlock); | 795 | seq = read_seqbegin(&t->vtime_seqlock); |
| 793 | 796 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index e3cc16312046..8ec86abe0ea1 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -64,7 +64,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | |||
| 64 | raw_spin_unlock(&rt_b->rt_runtime_lock); | 64 | raw_spin_unlock(&rt_b->rt_runtime_lock); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_SMP | 67 | #if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI) |
| 68 | static void push_irq_work_func(struct irq_work *work); | 68 | static void push_irq_work_func(struct irq_work *work); |
| 69 | #endif | 69 | #endif |
| 70 | 70 | ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index efd3bfc7e347..b242775bf670 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
| @@ -1073,6 +1073,9 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
| 1073 | * We must ensure this doesn't happen until the switch is completely | 1073 | * We must ensure this doesn't happen until the switch is completely |
| 1074 | * finished. | 1074 | * finished. |
| 1075 | * | 1075 | * |
| 1076 | * In particular, the load of prev->state in finish_task_switch() must | ||
| 1077 | * happen before this. | ||
| 1078 | * | ||
| 1076 | * Pairs with the control dependency and rmb in try_to_wake_up(). | 1079 | * Pairs with the control dependency and rmb in try_to_wake_up(). |
| 1077 | */ | 1080 | */ |
| 1078 | smp_store_release(&prev->on_cpu, 0); | 1081 | smp_store_release(&prev->on_cpu, 0); |
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c index 052e02672d12..f10bd873e684 100644 --- a/kernel/sched/wait.c +++ b/kernel/sched/wait.c | |||
| @@ -583,18 +583,18 @@ EXPORT_SYMBOL(wake_up_atomic_t); | |||
| 583 | 583 | ||
| 584 | __sched int bit_wait(struct wait_bit_key *word) | 584 | __sched int bit_wait(struct wait_bit_key *word) |
| 585 | { | 585 | { |
| 586 | if (signal_pending_state(current->state, current)) | ||
| 587 | return 1; | ||
| 588 | schedule(); | 586 | schedule(); |
| 587 | if (signal_pending(current)) | ||
| 588 | return -EINTR; | ||
| 589 | return 0; | 589 | return 0; |
| 590 | } | 590 | } |
| 591 | EXPORT_SYMBOL(bit_wait); | 591 | EXPORT_SYMBOL(bit_wait); |
| 592 | 592 | ||
| 593 | __sched int bit_wait_io(struct wait_bit_key *word) | 593 | __sched int bit_wait_io(struct wait_bit_key *word) |
| 594 | { | 594 | { |
| 595 | if (signal_pending_state(current->state, current)) | ||
| 596 | return 1; | ||
| 597 | io_schedule(); | 595 | io_schedule(); |
| 596 | if (signal_pending(current)) | ||
| 597 | return -EINTR; | ||
| 598 | return 0; | 598 | return 0; |
| 599 | } | 599 | } |
| 600 | EXPORT_SYMBOL(bit_wait_io); | 600 | EXPORT_SYMBOL(bit_wait_io); |
| @@ -602,11 +602,11 @@ EXPORT_SYMBOL(bit_wait_io); | |||
| 602 | __sched int bit_wait_timeout(struct wait_bit_key *word) | 602 | __sched int bit_wait_timeout(struct wait_bit_key *word) |
| 603 | { | 603 | { |
| 604 | unsigned long now = READ_ONCE(jiffies); | 604 | unsigned long now = READ_ONCE(jiffies); |
| 605 | if (signal_pending_state(current->state, current)) | ||
| 606 | return 1; | ||
| 607 | if (time_after_eq(now, word->timeout)) | 605 | if (time_after_eq(now, word->timeout)) |
| 608 | return -EAGAIN; | 606 | return -EAGAIN; |
| 609 | schedule_timeout(word->timeout - now); | 607 | schedule_timeout(word->timeout - now); |
| 608 | if (signal_pending(current)) | ||
| 609 | return -EINTR; | ||
| 610 | return 0; | 610 | return 0; |
| 611 | } | 611 | } |
| 612 | EXPORT_SYMBOL_GPL(bit_wait_timeout); | 612 | EXPORT_SYMBOL_GPL(bit_wait_timeout); |
| @@ -614,11 +614,11 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout); | |||
| 614 | __sched int bit_wait_io_timeout(struct wait_bit_key *word) | 614 | __sched int bit_wait_io_timeout(struct wait_bit_key *word) |
| 615 | { | 615 | { |
| 616 | unsigned long now = READ_ONCE(jiffies); | 616 | unsigned long now = READ_ONCE(jiffies); |
| 617 | if (signal_pending_state(current->state, current)) | ||
| 618 | return 1; | ||
| 619 | if (time_after_eq(now, word->timeout)) | 617 | if (time_after_eq(now, word->timeout)) |
| 620 | return -EAGAIN; | 618 | return -EAGAIN; |
| 621 | io_schedule_timeout(word->timeout - now); | 619 | io_schedule_timeout(word->timeout - now); |
| 620 | if (signal_pending(current)) | ||
| 621 | return -EINTR; | ||
| 622 | return 0; | 622 | return 0; |
| 623 | } | 623 | } |
| 624 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); | 624 | EXPORT_SYMBOL_GPL(bit_wait_io_timeout); |
