diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 214 |
1 files changed, 119 insertions, 95 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index fde6ff903525..ccacdbdecf45 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -75,6 +75,9 @@ | |||
75 | #include <asm/tlb.h> | 75 | #include <asm/tlb.h> |
76 | #include <asm/irq_regs.h> | 76 | #include <asm/irq_regs.h> |
77 | #include <asm/mutex.h> | 77 | #include <asm/mutex.h> |
78 | #ifdef CONFIG_PARAVIRT | ||
79 | #include <asm/paravirt.h> | ||
80 | #endif | ||
78 | 81 | ||
79 | #include "sched_cpupri.h" | 82 | #include "sched_cpupri.h" |
80 | #include "workqueue_sched.h" | 83 | #include "workqueue_sched.h" |
@@ -124,7 +127,7 @@ | |||
124 | 127 | ||
125 | static inline int rt_policy(int policy) | 128 | static inline int rt_policy(int policy) |
126 | { | 129 | { |
127 | if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR)) | 130 | if (policy == SCHED_FIFO || policy == SCHED_RR) |
128 | return 1; | 131 | return 1; |
129 | return 0; | 132 | return 0; |
130 | } | 133 | } |
@@ -422,6 +425,7 @@ struct rt_rq { | |||
422 | */ | 425 | */ |
423 | struct root_domain { | 426 | struct root_domain { |
424 | atomic_t refcount; | 427 | atomic_t refcount; |
428 | atomic_t rto_count; | ||
425 | struct rcu_head rcu; | 429 | struct rcu_head rcu; |
426 | cpumask_var_t span; | 430 | cpumask_var_t span; |
427 | cpumask_var_t online; | 431 | cpumask_var_t online; |
@@ -431,7 +435,6 @@ struct root_domain { | |||
431 | * one runnable RT task. | 435 | * one runnable RT task. |
432 | */ | 436 | */ |
433 | cpumask_var_t rto_mask; | 437 | cpumask_var_t rto_mask; |
434 | atomic_t rto_count; | ||
435 | struct cpupri cpupri; | 438 | struct cpupri cpupri; |
436 | }; | 439 | }; |
437 | 440 | ||
@@ -528,6 +531,12 @@ struct rq { | |||
528 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 531 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
529 | u64 prev_irq_time; | 532 | u64 prev_irq_time; |
530 | #endif | 533 | #endif |
534 | #ifdef CONFIG_PARAVIRT | ||
535 | u64 prev_steal_time; | ||
536 | #endif | ||
537 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | ||
538 | u64 prev_steal_time_rq; | ||
539 | #endif | ||
531 | 540 | ||
532 | /* calc_load related fields */ | 541 | /* calc_load related fields */ |
533 | unsigned long calc_load_update; | 542 | unsigned long calc_load_update; |
@@ -581,7 +590,6 @@ static inline int cpu_of(struct rq *rq) | |||
581 | 590 | ||
582 | #define rcu_dereference_check_sched_domain(p) \ | 591 | #define rcu_dereference_check_sched_domain(p) \ |
583 | rcu_dereference_check((p), \ | 592 | rcu_dereference_check((p), \ |
584 | rcu_read_lock_held() || \ | ||
585 | lockdep_is_held(&sched_domains_mutex)) | 593 | lockdep_is_held(&sched_domains_mutex)) |
586 | 594 | ||
587 | /* | 595 | /* |
@@ -1568,38 +1576,6 @@ static unsigned long cpu_avg_load_per_task(int cpu) | |||
1568 | return rq->avg_load_per_task; | 1576 | return rq->avg_load_per_task; |
1569 | } | 1577 | } |
1570 | 1578 | ||
1571 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1572 | |||
1573 | /* | ||
1574 | * Compute the cpu's hierarchical load factor for each task group. | ||
1575 | * This needs to be done in a top-down fashion because the load of a child | ||
1576 | * group is a fraction of its parents load. | ||
1577 | */ | ||
1578 | static int tg_load_down(struct task_group *tg, void *data) | ||
1579 | { | ||
1580 | unsigned long load; | ||
1581 | long cpu = (long)data; | ||
1582 | |||
1583 | if (!tg->parent) { | ||
1584 | load = cpu_rq(cpu)->load.weight; | ||
1585 | } else { | ||
1586 | load = tg->parent->cfs_rq[cpu]->h_load; | ||
1587 | load *= tg->se[cpu]->load.weight; | ||
1588 | load /= tg->parent->cfs_rq[cpu]->load.weight + 1; | ||
1589 | } | ||
1590 | |||
1591 | tg->cfs_rq[cpu]->h_load = load; | ||
1592 | |||
1593 | return 0; | ||
1594 | } | ||
1595 | |||
1596 | static void update_h_load(long cpu) | ||
1597 | { | ||
1598 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); | ||
1599 | } | ||
1600 | |||
1601 | #endif | ||
1602 | |||
1603 | #ifdef CONFIG_PREEMPT | 1579 | #ifdef CONFIG_PREEMPT |
1604 | 1580 | ||
1605 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); | 1581 | static void double_rq_lock(struct rq *rq1, struct rq *rq2); |
@@ -1953,10 +1929,28 @@ void account_system_vtime(struct task_struct *curr) | |||
1953 | } | 1929 | } |
1954 | EXPORT_SYMBOL_GPL(account_system_vtime); | 1930 | EXPORT_SYMBOL_GPL(account_system_vtime); |
1955 | 1931 | ||
1956 | static void update_rq_clock_task(struct rq *rq, s64 delta) | 1932 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ |
1933 | |||
1934 | #ifdef CONFIG_PARAVIRT | ||
1935 | static inline u64 steal_ticks(u64 steal) | ||
1957 | { | 1936 | { |
1958 | s64 irq_delta; | 1937 | if (unlikely(steal > NSEC_PER_SEC)) |
1938 | return div_u64(steal, TICK_NSEC); | ||
1939 | |||
1940 | return __iter_div_u64_rem(steal, TICK_NSEC, &steal); | ||
1941 | } | ||
1942 | #endif | ||
1959 | 1943 | ||
1944 | static void update_rq_clock_task(struct rq *rq, s64 delta) | ||
1945 | { | ||
1946 | /* | ||
1947 | * In theory, the compile should just see 0 here, and optimize out the call | ||
1948 | * to sched_rt_avg_update. But I don't trust it... | ||
1949 | */ | ||
1950 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) | ||
1951 | s64 steal = 0, irq_delta = 0; | ||
1952 | #endif | ||
1953 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
1960 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; | 1954 | irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; |
1961 | 1955 | ||
1962 | /* | 1956 | /* |
@@ -1979,12 +1973,35 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) | |||
1979 | 1973 | ||
1980 | rq->prev_irq_time += irq_delta; | 1974 | rq->prev_irq_time += irq_delta; |
1981 | delta -= irq_delta; | 1975 | delta -= irq_delta; |
1976 | #endif | ||
1977 | #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING | ||
1978 | if (static_branch((¶virt_steal_rq_enabled))) { | ||
1979 | u64 st; | ||
1980 | |||
1981 | steal = paravirt_steal_clock(cpu_of(rq)); | ||
1982 | steal -= rq->prev_steal_time_rq; | ||
1983 | |||
1984 | if (unlikely(steal > delta)) | ||
1985 | steal = delta; | ||
1986 | |||
1987 | st = steal_ticks(steal); | ||
1988 | steal = st * TICK_NSEC; | ||
1989 | |||
1990 | rq->prev_steal_time_rq += steal; | ||
1991 | |||
1992 | delta -= steal; | ||
1993 | } | ||
1994 | #endif | ||
1995 | |||
1982 | rq->clock_task += delta; | 1996 | rq->clock_task += delta; |
1983 | 1997 | ||
1984 | if (irq_delta && sched_feat(NONIRQ_POWER)) | 1998 | #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) |
1985 | sched_rt_avg_update(rq, irq_delta); | 1999 | if ((irq_delta + steal) && sched_feat(NONTASK_POWER)) |
2000 | sched_rt_avg_update(rq, irq_delta + steal); | ||
2001 | #endif | ||
1986 | } | 2002 | } |
1987 | 2003 | ||
2004 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
1988 | static int irqtime_account_hi_update(void) | 2005 | static int irqtime_account_hi_update(void) |
1989 | { | 2006 | { |
1990 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 2007 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
@@ -2019,12 +2036,7 @@ static int irqtime_account_si_update(void) | |||
2019 | 2036 | ||
2020 | #define sched_clock_irqtime (0) | 2037 | #define sched_clock_irqtime (0) |
2021 | 2038 | ||
2022 | static void update_rq_clock_task(struct rq *rq, s64 delta) | 2039 | #endif |
2023 | { | ||
2024 | rq->clock_task += delta; | ||
2025 | } | ||
2026 | |||
2027 | #endif /* CONFIG_IRQ_TIME_ACCOUNTING */ | ||
2028 | 2040 | ||
2029 | #include "sched_idletask.c" | 2041 | #include "sched_idletask.c" |
2030 | #include "sched_fair.c" | 2042 | #include "sched_fair.c" |
@@ -2220,7 +2232,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
2220 | 2232 | ||
2221 | if (task_cpu(p) != new_cpu) { | 2233 | if (task_cpu(p) != new_cpu) { |
2222 | p->se.nr_migrations++; | 2234 | p->se.nr_migrations++; |
2223 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0); | 2235 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); |
2224 | } | 2236 | } |
2225 | 2237 | ||
2226 | __set_task_cpu(p, new_cpu); | 2238 | __set_task_cpu(p, new_cpu); |
@@ -2497,7 +2509,7 @@ ttwu_do_wakeup(struct rq *rq, struct task_struct *p, int wake_flags) | |||
2497 | if (p->sched_class->task_woken) | 2509 | if (p->sched_class->task_woken) |
2498 | p->sched_class->task_woken(rq, p); | 2510 | p->sched_class->task_woken(rq, p); |
2499 | 2511 | ||
2500 | if (unlikely(rq->idle_stamp)) { | 2512 | if (rq->idle_stamp) { |
2501 | u64 delta = rq->clock - rq->idle_stamp; | 2513 | u64 delta = rq->clock - rq->idle_stamp; |
2502 | u64 max = 2*sysctl_sched_migration_cost; | 2514 | u64 max = 2*sysctl_sched_migration_cost; |
2503 | 2515 | ||
@@ -2886,7 +2898,7 @@ void sched_fork(struct task_struct *p) | |||
2886 | #if defined(CONFIG_SMP) | 2898 | #if defined(CONFIG_SMP) |
2887 | p->on_cpu = 0; | 2899 | p->on_cpu = 0; |
2888 | #endif | 2900 | #endif |
2889 | #ifdef CONFIG_PREEMPT | 2901 | #ifdef CONFIG_PREEMPT_COUNT |
2890 | /* Want to start with kernel preemption disabled. */ | 2902 | /* Want to start with kernel preemption disabled. */ |
2891 | task_thread_info(p)->preempt_count = 1; | 2903 | task_thread_info(p)->preempt_count = 1; |
2892 | #endif | 2904 | #endif |
@@ -3877,6 +3889,25 @@ void account_idle_time(cputime_t cputime) | |||
3877 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); | 3889 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
3878 | } | 3890 | } |
3879 | 3891 | ||
3892 | static __always_inline bool steal_account_process_tick(void) | ||
3893 | { | ||
3894 | #ifdef CONFIG_PARAVIRT | ||
3895 | if (static_branch(¶virt_steal_enabled)) { | ||
3896 | u64 steal, st = 0; | ||
3897 | |||
3898 | steal = paravirt_steal_clock(smp_processor_id()); | ||
3899 | steal -= this_rq()->prev_steal_time; | ||
3900 | |||
3901 | st = steal_ticks(steal); | ||
3902 | this_rq()->prev_steal_time += st * TICK_NSEC; | ||
3903 | |||
3904 | account_steal_time(st); | ||
3905 | return st; | ||
3906 | } | ||
3907 | #endif | ||
3908 | return false; | ||
3909 | } | ||
3910 | |||
3880 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 3911 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
3881 | 3912 | ||
3882 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | 3913 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING |
@@ -3908,6 +3939,9 @@ static void irqtime_account_process_tick(struct task_struct *p, int user_tick, | |||
3908 | cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); | 3939 | cputime64_t tmp = cputime_to_cputime64(cputime_one_jiffy); |
3909 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 3940 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
3910 | 3941 | ||
3942 | if (steal_account_process_tick()) | ||
3943 | return; | ||
3944 | |||
3911 | if (irqtime_account_hi_update()) { | 3945 | if (irqtime_account_hi_update()) { |
3912 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 3946 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
3913 | } else if (irqtime_account_si_update()) { | 3947 | } else if (irqtime_account_si_update()) { |
@@ -3961,6 +3995,9 @@ void account_process_tick(struct task_struct *p, int user_tick) | |||
3961 | return; | 3995 | return; |
3962 | } | 3996 | } |
3963 | 3997 | ||
3998 | if (steal_account_process_tick()) | ||
3999 | return; | ||
4000 | |||
3964 | if (user_tick) | 4001 | if (user_tick) |
3965 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); | 4002 | account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); |
3966 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) | 4003 | else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) |
@@ -4338,11 +4375,8 @@ EXPORT_SYMBOL(schedule); | |||
4338 | 4375 | ||
4339 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | 4376 | static inline bool owner_running(struct mutex *lock, struct task_struct *owner) |
4340 | { | 4377 | { |
4341 | bool ret = false; | ||
4342 | |||
4343 | rcu_read_lock(); | ||
4344 | if (lock->owner != owner) | 4378 | if (lock->owner != owner) |
4345 | goto fail; | 4379 | return false; |
4346 | 4380 | ||
4347 | /* | 4381 | /* |
4348 | * Ensure we emit the owner->on_cpu, dereference _after_ checking | 4382 | * Ensure we emit the owner->on_cpu, dereference _after_ checking |
@@ -4352,11 +4386,7 @@ static inline bool owner_running(struct mutex *lock, struct task_struct *owner) | |||
4352 | */ | 4386 | */ |
4353 | barrier(); | 4387 | barrier(); |
4354 | 4388 | ||
4355 | ret = owner->on_cpu; | 4389 | return owner->on_cpu; |
4356 | fail: | ||
4357 | rcu_read_unlock(); | ||
4358 | |||
4359 | return ret; | ||
4360 | } | 4390 | } |
4361 | 4391 | ||
4362 | /* | 4392 | /* |
@@ -4368,21 +4398,21 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) | |||
4368 | if (!sched_feat(OWNER_SPIN)) | 4398 | if (!sched_feat(OWNER_SPIN)) |
4369 | return 0; | 4399 | return 0; |
4370 | 4400 | ||
4401 | rcu_read_lock(); | ||
4371 | while (owner_running(lock, owner)) { | 4402 | while (owner_running(lock, owner)) { |
4372 | if (need_resched()) | 4403 | if (need_resched()) |
4373 | return 0; | 4404 | break; |
4374 | 4405 | ||
4375 | arch_mutex_cpu_relax(); | 4406 | arch_mutex_cpu_relax(); |
4376 | } | 4407 | } |
4408 | rcu_read_unlock(); | ||
4377 | 4409 | ||
4378 | /* | 4410 | /* |
4379 | * If the owner changed to another task there is likely | 4411 | * We break out the loop above on need_resched() and when the |
4380 | * heavy contention, stop spinning. | 4412 | * owner changed, which is a sign for heavy contention. Return |
4413 | * success only when lock->owner is NULL. | ||
4381 | */ | 4414 | */ |
4382 | if (lock->owner) | 4415 | return lock->owner == NULL; |
4383 | return 0; | ||
4384 | |||
4385 | return 1; | ||
4386 | } | 4416 | } |
4387 | #endif | 4417 | #endif |
4388 | 4418 | ||
@@ -7898,17 +7928,10 @@ int in_sched_functions(unsigned long addr) | |||
7898 | && addr < (unsigned long)__sched_text_end); | 7928 | && addr < (unsigned long)__sched_text_end); |
7899 | } | 7929 | } |
7900 | 7930 | ||
7901 | static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq) | 7931 | static void init_cfs_rq(struct cfs_rq *cfs_rq) |
7902 | { | 7932 | { |
7903 | cfs_rq->tasks_timeline = RB_ROOT; | 7933 | cfs_rq->tasks_timeline = RB_ROOT; |
7904 | INIT_LIST_HEAD(&cfs_rq->tasks); | 7934 | INIT_LIST_HEAD(&cfs_rq->tasks); |
7905 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
7906 | cfs_rq->rq = rq; | ||
7907 | /* allow initial update_cfs_load() to truncate */ | ||
7908 | #ifdef CONFIG_SMP | ||
7909 | cfs_rq->load_stamp = 1; | ||
7910 | #endif | ||
7911 | #endif | ||
7912 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); | 7935 | cfs_rq->min_vruntime = (u64)(-(1LL << 20)); |
7913 | #ifndef CONFIG_64BIT | 7936 | #ifndef CONFIG_64BIT |
7914 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; | 7937 | cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime; |
@@ -7928,27 +7951,18 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
7928 | /* delimiter for bitsearch: */ | 7951 | /* delimiter for bitsearch: */ |
7929 | __set_bit(MAX_RT_PRIO, array->bitmap); | 7952 | __set_bit(MAX_RT_PRIO, array->bitmap); |
7930 | 7953 | ||
7931 | #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED | 7954 | #if defined CONFIG_SMP |
7932 | rt_rq->highest_prio.curr = MAX_RT_PRIO; | 7955 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
7933 | #ifdef CONFIG_SMP | ||
7934 | rt_rq->highest_prio.next = MAX_RT_PRIO; | 7956 | rt_rq->highest_prio.next = MAX_RT_PRIO; |
7935 | #endif | ||
7936 | #endif | ||
7937 | #ifdef CONFIG_SMP | ||
7938 | rt_rq->rt_nr_migratory = 0; | 7957 | rt_rq->rt_nr_migratory = 0; |
7939 | rt_rq->overloaded = 0; | 7958 | rt_rq->overloaded = 0; |
7940 | plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock); | 7959 | plist_head_init(&rt_rq->pushable_tasks); |
7941 | #endif | 7960 | #endif |
7942 | 7961 | ||
7943 | rt_rq->rt_time = 0; | 7962 | rt_rq->rt_time = 0; |
7944 | rt_rq->rt_throttled = 0; | 7963 | rt_rq->rt_throttled = 0; |
7945 | rt_rq->rt_runtime = 0; | 7964 | rt_rq->rt_runtime = 0; |
7946 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); | 7965 | raw_spin_lock_init(&rt_rq->rt_runtime_lock); |
7947 | |||
7948 | #ifdef CONFIG_RT_GROUP_SCHED | ||
7949 | rt_rq->rt_nr_boosted = 0; | ||
7950 | rt_rq->rq = rq; | ||
7951 | #endif | ||
7952 | } | 7966 | } |
7953 | 7967 | ||
7954 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7968 | #ifdef CONFIG_FAIR_GROUP_SCHED |
@@ -7957,11 +7971,17 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, | |||
7957 | struct sched_entity *parent) | 7971 | struct sched_entity *parent) |
7958 | { | 7972 | { |
7959 | struct rq *rq = cpu_rq(cpu); | 7973 | struct rq *rq = cpu_rq(cpu); |
7960 | tg->cfs_rq[cpu] = cfs_rq; | 7974 | |
7961 | init_cfs_rq(cfs_rq, rq); | ||
7962 | cfs_rq->tg = tg; | 7975 | cfs_rq->tg = tg; |
7976 | cfs_rq->rq = rq; | ||
7977 | #ifdef CONFIG_SMP | ||
7978 | /* allow initial update_cfs_load() to truncate */ | ||
7979 | cfs_rq->load_stamp = 1; | ||
7980 | #endif | ||
7963 | 7981 | ||
7982 | tg->cfs_rq[cpu] = cfs_rq; | ||
7964 | tg->se[cpu] = se; | 7983 | tg->se[cpu] = se; |
7984 | |||
7965 | /* se could be NULL for root_task_group */ | 7985 | /* se could be NULL for root_task_group */ |
7966 | if (!se) | 7986 | if (!se) |
7967 | return; | 7987 | return; |
@@ -7984,12 +8004,14 @@ static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, | |||
7984 | { | 8004 | { |
7985 | struct rq *rq = cpu_rq(cpu); | 8005 | struct rq *rq = cpu_rq(cpu); |
7986 | 8006 | ||
7987 | tg->rt_rq[cpu] = rt_rq; | 8007 | rt_rq->highest_prio.curr = MAX_RT_PRIO; |
7988 | init_rt_rq(rt_rq, rq); | 8008 | rt_rq->rt_nr_boosted = 0; |
8009 | rt_rq->rq = rq; | ||
7989 | rt_rq->tg = tg; | 8010 | rt_rq->tg = tg; |
7990 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; | ||
7991 | 8011 | ||
8012 | tg->rt_rq[cpu] = rt_rq; | ||
7992 | tg->rt_se[cpu] = rt_se; | 8013 | tg->rt_se[cpu] = rt_se; |
8014 | |||
7993 | if (!rt_se) | 8015 | if (!rt_se) |
7994 | return; | 8016 | return; |
7995 | 8017 | ||
@@ -8071,7 +8093,7 @@ void __init sched_init(void) | |||
8071 | rq->nr_running = 0; | 8093 | rq->nr_running = 0; |
8072 | rq->calc_load_active = 0; | 8094 | rq->calc_load_active = 0; |
8073 | rq->calc_load_update = jiffies + LOAD_FREQ; | 8095 | rq->calc_load_update = jiffies + LOAD_FREQ; |
8074 | init_cfs_rq(&rq->cfs, rq); | 8096 | init_cfs_rq(&rq->cfs); |
8075 | init_rt_rq(&rq->rt, rq); | 8097 | init_rt_rq(&rq->rt, rq); |
8076 | #ifdef CONFIG_FAIR_GROUP_SCHED | 8098 | #ifdef CONFIG_FAIR_GROUP_SCHED |
8077 | root_task_group.shares = root_task_group_load; | 8099 | root_task_group.shares = root_task_group_load; |
@@ -8142,7 +8164,7 @@ void __init sched_init(void) | |||
8142 | #endif | 8164 | #endif |
8143 | 8165 | ||
8144 | #ifdef CONFIG_RT_MUTEXES | 8166 | #ifdef CONFIG_RT_MUTEXES |
8145 | plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock); | 8167 | plist_head_init(&init_task.pi_waiters); |
8146 | #endif | 8168 | #endif |
8147 | 8169 | ||
8148 | /* | 8170 | /* |
@@ -8185,7 +8207,7 @@ void __init sched_init(void) | |||
8185 | scheduler_running = 1; | 8207 | scheduler_running = 1; |
8186 | } | 8208 | } |
8187 | 8209 | ||
8188 | #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP | 8210 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
8189 | static inline int preempt_count_equals(int preempt_offset) | 8211 | static inline int preempt_count_equals(int preempt_offset) |
8190 | { | 8212 | { |
8191 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); | 8213 | int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth(); |
@@ -8195,7 +8217,6 @@ static inline int preempt_count_equals(int preempt_offset) | |||
8195 | 8217 | ||
8196 | void __might_sleep(const char *file, int line, int preempt_offset) | 8218 | void __might_sleep(const char *file, int line, int preempt_offset) |
8197 | { | 8219 | { |
8198 | #ifdef in_atomic | ||
8199 | static unsigned long prev_jiffy; /* ratelimiting */ | 8220 | static unsigned long prev_jiffy; /* ratelimiting */ |
8200 | 8221 | ||
8201 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || | 8222 | if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) || |
@@ -8217,7 +8238,6 @@ void __might_sleep(const char *file, int line, int preempt_offset) | |||
8217 | if (irqs_disabled()) | 8238 | if (irqs_disabled()) |
8218 | print_irqtrace_events(current); | 8239 | print_irqtrace_events(current); |
8219 | dump_stack(); | 8240 | dump_stack(); |
8220 | #endif | ||
8221 | } | 8241 | } |
8222 | EXPORT_SYMBOL(__might_sleep); | 8242 | EXPORT_SYMBOL(__might_sleep); |
8223 | #endif | 8243 | #endif |
@@ -8376,6 +8396,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) | |||
8376 | if (!se) | 8396 | if (!se) |
8377 | goto err_free_rq; | 8397 | goto err_free_rq; |
8378 | 8398 | ||
8399 | init_cfs_rq(cfs_rq); | ||
8379 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); | 8400 | init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); |
8380 | } | 8401 | } |
8381 | 8402 | ||
@@ -8403,7 +8424,7 @@ static inline void unregister_fair_sched_group(struct task_group *tg, int cpu) | |||
8403 | list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); | 8424 | list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); |
8404 | raw_spin_unlock_irqrestore(&rq->lock, flags); | 8425 | raw_spin_unlock_irqrestore(&rq->lock, flags); |
8405 | } | 8426 | } |
8406 | #else /* !CONFG_FAIR_GROUP_SCHED */ | 8427 | #else /* !CONFIG_FAIR_GROUP_SCHED */ |
8407 | static inline void free_fair_sched_group(struct task_group *tg) | 8428 | static inline void free_fair_sched_group(struct task_group *tg) |
8408 | { | 8429 | { |
8409 | } | 8430 | } |
@@ -8424,7 +8445,8 @@ static void free_rt_sched_group(struct task_group *tg) | |||
8424 | { | 8445 | { |
8425 | int i; | 8446 | int i; |
8426 | 8447 | ||
8427 | destroy_rt_bandwidth(&tg->rt_bandwidth); | 8448 | if (tg->rt_se) |
8449 | destroy_rt_bandwidth(&tg->rt_bandwidth); | ||
8428 | 8450 | ||
8429 | for_each_possible_cpu(i) { | 8451 | for_each_possible_cpu(i) { |
8430 | if (tg->rt_rq) | 8452 | if (tg->rt_rq) |
@@ -8465,6 +8487,8 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) | |||
8465 | if (!rt_se) | 8487 | if (!rt_se) |
8466 | goto err_free_rq; | 8488 | goto err_free_rq; |
8467 | 8489 | ||
8490 | init_rt_rq(rt_rq, cpu_rq(i)); | ||
8491 | rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; | ||
8468 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); | 8492 | init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); |
8469 | } | 8493 | } |
8470 | 8494 | ||