aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c92
1 files changed, 64 insertions, 28 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 123673291ffb..20b858f2db22 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -511,7 +511,7 @@ static bool set_nr_and_not_polling(struct task_struct *p)
511static bool set_nr_if_polling(struct task_struct *p) 511static bool set_nr_if_polling(struct task_struct *p)
512{ 512{
513 struct thread_info *ti = task_thread_info(p); 513 struct thread_info *ti = task_thread_info(p);
514 typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags); 514 typeof(ti->flags) old, val = READ_ONCE(ti->flags);
515 515
516 for (;;) { 516 for (;;) {
517 if (!(val & _TIF_POLLING_NRFLAG)) 517 if (!(val & _TIF_POLLING_NRFLAG))
@@ -541,6 +541,52 @@ static bool set_nr_if_polling(struct task_struct *p)
541#endif 541#endif
542#endif 542#endif
543 543
544void wake_q_add(struct wake_q_head *head, struct task_struct *task)
545{
546 struct wake_q_node *node = &task->wake_q;
547
548 /*
549 * Atomically grab the task, if ->wake_q is !nil already it means
550 * its already queued (either by us or someone else) and will get the
551 * wakeup due to that.
552 *
553 * This cmpxchg() implies a full barrier, which pairs with the write
554 * barrier implied by the wakeup in wake_up_list().
555 */
556 if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
557 return;
558
559 get_task_struct(task);
560
561 /*
562 * The head is context local, there can be no concurrency.
563 */
564 *head->lastp = node;
565 head->lastp = &node->next;
566}
567
568void wake_up_q(struct wake_q_head *head)
569{
570 struct wake_q_node *node = head->first;
571
572 while (node != WAKE_Q_TAIL) {
573 struct task_struct *task;
574
575 task = container_of(node, struct task_struct, wake_q);
576 BUG_ON(!task);
577 /* task can safely be re-inserted now */
578 node = node->next;
579 task->wake_q.next = NULL;
580
581 /*
582 * wake_up_process() implies a wmb() to pair with the queueing
583 * in wake_q_add() so as not to miss wakeups.
584 */
585 wake_up_process(task);
586 put_task_struct(task);
587 }
588}
589
544/* 590/*
545 * resched_curr - mark rq's current task 'to be rescheduled now'. 591 * resched_curr - mark rq's current task 'to be rescheduled now'.
546 * 592 *
@@ -2397,9 +2443,9 @@ unsigned long nr_iowait_cpu(int cpu)
2397 2443
2398void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) 2444void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
2399{ 2445{
2400 struct rq *this = this_rq(); 2446 struct rq *rq = this_rq();
2401 *nr_waiters = atomic_read(&this->nr_iowait); 2447 *nr_waiters = atomic_read(&rq->nr_iowait);
2402 *load = this->cpu_load[0]; 2448 *load = rq->load.weight;
2403} 2449}
2404 2450
2405#ifdef CONFIG_SMP 2451#ifdef CONFIG_SMP
@@ -2497,6 +2543,7 @@ void scheduler_tick(void)
2497 update_rq_clock(rq); 2543 update_rq_clock(rq);
2498 curr->sched_class->task_tick(rq, curr, 0); 2544 curr->sched_class->task_tick(rq, curr, 0);
2499 update_cpu_load_active(rq); 2545 update_cpu_load_active(rq);
2546 calc_global_load_tick(rq);
2500 raw_spin_unlock(&rq->lock); 2547 raw_spin_unlock(&rq->lock);
2501 2548
2502 perf_event_task_tick(); 2549 perf_event_task_tick();
@@ -2525,7 +2572,7 @@ void scheduler_tick(void)
2525u64 scheduler_tick_max_deferment(void) 2572u64 scheduler_tick_max_deferment(void)
2526{ 2573{
2527 struct rq *rq = this_rq(); 2574 struct rq *rq = this_rq();
2528 unsigned long next, now = ACCESS_ONCE(jiffies); 2575 unsigned long next, now = READ_ONCE(jiffies);
2529 2576
2530 next = rq->last_sched_tick + HZ; 2577 next = rq->last_sched_tick + HZ;
2531 2578
@@ -2726,9 +2773,7 @@ again:
2726 * - return from syscall or exception to user-space 2773 * - return from syscall or exception to user-space
2727 * - return from interrupt-handler to user-space 2774 * - return from interrupt-handler to user-space
2728 * 2775 *
2729 * WARNING: all callers must re-check need_resched() afterward and reschedule 2776 * WARNING: must be called with preemption disabled!
2730 * accordingly in case an event triggered the need for rescheduling (such as
2731 * an interrupt waking up a task) while preemption was disabled in __schedule().
2732 */ 2777 */
2733static void __sched __schedule(void) 2778static void __sched __schedule(void)
2734{ 2779{
@@ -2737,7 +2782,6 @@ static void __sched __schedule(void)
2737 struct rq *rq; 2782 struct rq *rq;
2738 int cpu; 2783 int cpu;
2739 2784
2740 preempt_disable();
2741 cpu = smp_processor_id(); 2785 cpu = smp_processor_id();
2742 rq = cpu_rq(cpu); 2786 rq = cpu_rq(cpu);
2743 rcu_note_context_switch(); 2787 rcu_note_context_switch();
@@ -2801,8 +2845,6 @@ static void __sched __schedule(void)
2801 raw_spin_unlock_irq(&rq->lock); 2845 raw_spin_unlock_irq(&rq->lock);
2802 2846
2803 post_schedule(rq); 2847 post_schedule(rq);
2804
2805 sched_preempt_enable_no_resched();
2806} 2848}
2807 2849
2808static inline void sched_submit_work(struct task_struct *tsk) 2850static inline void sched_submit_work(struct task_struct *tsk)
@@ -2823,7 +2865,9 @@ asmlinkage __visible void __sched schedule(void)
2823 2865
2824 sched_submit_work(tsk); 2866 sched_submit_work(tsk);
2825 do { 2867 do {
2868 preempt_disable();
2826 __schedule(); 2869 __schedule();
2870 sched_preempt_enable_no_resched();
2827 } while (need_resched()); 2871 } while (need_resched());
2828} 2872}
2829EXPORT_SYMBOL(schedule); 2873EXPORT_SYMBOL(schedule);
@@ -2862,15 +2906,14 @@ void __sched schedule_preempt_disabled(void)
2862static void __sched notrace preempt_schedule_common(void) 2906static void __sched notrace preempt_schedule_common(void)
2863{ 2907{
2864 do { 2908 do {
2865 __preempt_count_add(PREEMPT_ACTIVE); 2909 preempt_active_enter();
2866 __schedule(); 2910 __schedule();
2867 __preempt_count_sub(PREEMPT_ACTIVE); 2911 preempt_active_exit();
2868 2912
2869 /* 2913 /*
2870 * Check again in case we missed a preemption opportunity 2914 * Check again in case we missed a preemption opportunity
2871 * between schedule and now. 2915 * between schedule and now.
2872 */ 2916 */
2873 barrier();
2874 } while (need_resched()); 2917 } while (need_resched());
2875} 2918}
2876 2919
@@ -2917,7 +2960,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
2917 return; 2960 return;
2918 2961
2919 do { 2962 do {
2920 __preempt_count_add(PREEMPT_ACTIVE); 2963 preempt_active_enter();
2921 /* 2964 /*
2922 * Needs preempt disabled in case user_exit() is traced 2965 * Needs preempt disabled in case user_exit() is traced
2923 * and the tracer calls preempt_enable_notrace() causing 2966 * and the tracer calls preempt_enable_notrace() causing
@@ -2927,8 +2970,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
2927 __schedule(); 2970 __schedule();
2928 exception_exit(prev_ctx); 2971 exception_exit(prev_ctx);
2929 2972
2930 __preempt_count_sub(PREEMPT_ACTIVE); 2973 preempt_active_exit();
2931 barrier();
2932 } while (need_resched()); 2974 } while (need_resched());
2933} 2975}
2934EXPORT_SYMBOL_GPL(preempt_schedule_context); 2976EXPORT_SYMBOL_GPL(preempt_schedule_context);
@@ -2952,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
2952 prev_state = exception_enter(); 2994 prev_state = exception_enter();
2953 2995
2954 do { 2996 do {
2955 __preempt_count_add(PREEMPT_ACTIVE); 2997 preempt_active_enter();
2956 local_irq_enable(); 2998 local_irq_enable();
2957 __schedule(); 2999 __schedule();
2958 local_irq_disable(); 3000 local_irq_disable();
2959 __preempt_count_sub(PREEMPT_ACTIVE); 3001 preempt_active_exit();
2960
2961 /*
2962 * Check again in case we missed a preemption opportunity
2963 * between schedule and now.
2964 */
2965 barrier();
2966 } while (need_resched()); 3002 } while (need_resched());
2967 3003
2968 exception_exit(prev_state); 3004 exception_exit(prev_state);
@@ -5314,7 +5350,7 @@ static struct notifier_block migration_notifier = {
5314 .priority = CPU_PRI_MIGRATION, 5350 .priority = CPU_PRI_MIGRATION,
5315}; 5351};
5316 5352
5317static void __cpuinit set_cpu_rq_start_time(void) 5353static void set_cpu_rq_start_time(void)
5318{ 5354{
5319 int cpu = smp_processor_id(); 5355 int cpu = smp_processor_id();
5320 struct rq *rq = cpu_rq(cpu); 5356 struct rq *rq = cpu_rq(cpu);
@@ -7734,11 +7770,11 @@ static long sched_group_rt_runtime(struct task_group *tg)
7734 return rt_runtime_us; 7770 return rt_runtime_us;
7735} 7771}
7736 7772
7737static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) 7773static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
7738{ 7774{
7739 u64 rt_runtime, rt_period; 7775 u64 rt_runtime, rt_period;
7740 7776
7741 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 7777 rt_period = rt_period_us * NSEC_PER_USEC;
7742 rt_runtime = tg->rt_bandwidth.rt_runtime; 7778 rt_runtime = tg->rt_bandwidth.rt_runtime;
7743 7779
7744 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7780 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);