aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c230
1 files changed, 103 insertions, 127 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5eab11d4b747..f0f831e8a345 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -119,7 +119,9 @@ void update_rq_clock(struct rq *rq)
119{ 119{
120 s64 delta; 120 s64 delta;
121 121
122 if (rq->skip_clock_update > 0) 122 lockdep_assert_held(&rq->lock);
123
124 if (rq->clock_skip_update & RQCF_ACT_SKIP)
123 return; 125 return;
124 126
125 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; 127 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
@@ -305,66 +307,6 @@ __read_mostly int scheduler_running;
305int sysctl_sched_rt_runtime = 950000; 307int sysctl_sched_rt_runtime = 950000;
306 308
307/* 309/*
308 * __task_rq_lock - lock the rq @p resides on.
309 */
310static inline struct rq *__task_rq_lock(struct task_struct *p)
311 __acquires(rq->lock)
312{
313 struct rq *rq;
314
315 lockdep_assert_held(&p->pi_lock);
316
317 for (;;) {
318 rq = task_rq(p);
319 raw_spin_lock(&rq->lock);
320 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
321 return rq;
322 raw_spin_unlock(&rq->lock);
323
324 while (unlikely(task_on_rq_migrating(p)))
325 cpu_relax();
326 }
327}
328
329/*
330 * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
331 */
332static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
333 __acquires(p->pi_lock)
334 __acquires(rq->lock)
335{
336 struct rq *rq;
337
338 for (;;) {
339 raw_spin_lock_irqsave(&p->pi_lock, *flags);
340 rq = task_rq(p);
341 raw_spin_lock(&rq->lock);
342 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
343 return rq;
344 raw_spin_unlock(&rq->lock);
345 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
346
347 while (unlikely(task_on_rq_migrating(p)))
348 cpu_relax();
349 }
350}
351
352static void __task_rq_unlock(struct rq *rq)
353 __releases(rq->lock)
354{
355 raw_spin_unlock(&rq->lock);
356}
357
358static inline void
359task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
360 __releases(rq->lock)
361 __releases(p->pi_lock)
362{
363 raw_spin_unlock(&rq->lock);
364 raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
365}
366
367/*
368 * this_rq_lock - lock this runqueue and disable interrupts. 310 * this_rq_lock - lock this runqueue and disable interrupts.
369 */ 311 */
370static struct rq *this_rq_lock(void) 312static struct rq *this_rq_lock(void)
@@ -490,6 +432,11 @@ static __init void init_hrtick(void)
490 */ 432 */
491void hrtick_start(struct rq *rq, u64 delay) 433void hrtick_start(struct rq *rq, u64 delay)
492{ 434{
435 /*
436 * Don't schedule slices shorter than 10000ns, that just
437 * doesn't make sense. Rely on vruntime for fairness.
438 */
439 delay = max_t(u64, delay, 10000LL);
493 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0, 440 __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
494 HRTIMER_MODE_REL_PINNED, 0); 441 HRTIMER_MODE_REL_PINNED, 0);
495} 442}
@@ -1046,7 +993,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1046 * this case, we can save a useless back to back clock update. 993 * this case, we can save a useless back to back clock update.
1047 */ 994 */
1048 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 995 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
1049 rq->skip_clock_update = 1; 996 rq_clock_skip_update(rq, true);
1050} 997}
1051 998
1052#ifdef CONFIG_SMP 999#ifdef CONFIG_SMP
@@ -1082,7 +1029,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
1082 if (p->sched_class->migrate_task_rq) 1029 if (p->sched_class->migrate_task_rq)
1083 p->sched_class->migrate_task_rq(p, new_cpu); 1030 p->sched_class->migrate_task_rq(p, new_cpu);
1084 p->se.nr_migrations++; 1031 p->se.nr_migrations++;
1085 perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); 1032 perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
1086 } 1033 }
1087 1034
1088 __set_task_cpu(p, new_cpu); 1035 __set_task_cpu(p, new_cpu);
@@ -1836,6 +1783,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1836 p->se.prev_sum_exec_runtime = 0; 1783 p->se.prev_sum_exec_runtime = 0;
1837 p->se.nr_migrations = 0; 1784 p->se.nr_migrations = 0;
1838 p->se.vruntime = 0; 1785 p->se.vruntime = 0;
1786#ifdef CONFIG_SMP
1787 p->se.avg.decay_count = 0;
1788#endif
1839 INIT_LIST_HEAD(&p->se.group_node); 1789 INIT_LIST_HEAD(&p->se.group_node);
1840 1790
1841#ifdef CONFIG_SCHEDSTATS 1791#ifdef CONFIG_SCHEDSTATS
@@ -2755,6 +2705,10 @@ again:
2755 * - explicit schedule() call 2705 * - explicit schedule() call
2756 * - return from syscall or exception to user-space 2706 * - return from syscall or exception to user-space
2757 * - return from interrupt-handler to user-space 2707 * - return from interrupt-handler to user-space
2708 *
2709 * WARNING: all callers must re-check need_resched() afterward and reschedule
2710 * accordingly in case an event triggered the need for rescheduling (such as
2711 * an interrupt waking up a task) while preemption was disabled in __schedule().
2758 */ 2712 */
2759static void __sched __schedule(void) 2713static void __sched __schedule(void)
2760{ 2714{
@@ -2763,7 +2717,6 @@ static void __sched __schedule(void)
2763 struct rq *rq; 2717 struct rq *rq;
2764 int cpu; 2718 int cpu;
2765 2719
2766need_resched:
2767 preempt_disable(); 2720 preempt_disable();
2768 cpu = smp_processor_id(); 2721 cpu = smp_processor_id();
2769 rq = cpu_rq(cpu); 2722 rq = cpu_rq(cpu);
@@ -2783,6 +2736,8 @@ need_resched:
2783 smp_mb__before_spinlock(); 2736 smp_mb__before_spinlock();
2784 raw_spin_lock_irq(&rq->lock); 2737 raw_spin_lock_irq(&rq->lock);
2785 2738
2739 rq->clock_skip_update <<= 1; /* promote REQ to ACT */
2740
2786 switch_count = &prev->nivcsw; 2741 switch_count = &prev->nivcsw;
2787 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 2742 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
2788 if (unlikely(signal_pending_state(prev->state, prev))) { 2743 if (unlikely(signal_pending_state(prev->state, prev))) {
@@ -2807,13 +2762,13 @@ need_resched:
2807 switch_count = &prev->nvcsw; 2762 switch_count = &prev->nvcsw;
2808 } 2763 }
2809 2764
2810 if (task_on_rq_queued(prev) || rq->skip_clock_update < 0) 2765 if (task_on_rq_queued(prev))
2811 update_rq_clock(rq); 2766 update_rq_clock(rq);
2812 2767
2813 next = pick_next_task(rq, prev); 2768 next = pick_next_task(rq, prev);
2814 clear_tsk_need_resched(prev); 2769 clear_tsk_need_resched(prev);
2815 clear_preempt_need_resched(); 2770 clear_preempt_need_resched();
2816 rq->skip_clock_update = 0; 2771 rq->clock_skip_update = 0;
2817 2772
2818 if (likely(prev != next)) { 2773 if (likely(prev != next)) {
2819 rq->nr_switches++; 2774 rq->nr_switches++;
@@ -2828,8 +2783,6 @@ need_resched:
2828 post_schedule(rq); 2783 post_schedule(rq);
2829 2784
2830 sched_preempt_enable_no_resched(); 2785 sched_preempt_enable_no_resched();
2831 if (need_resched())
2832 goto need_resched;
2833} 2786}
2834 2787
2835static inline void sched_submit_work(struct task_struct *tsk) 2788static inline void sched_submit_work(struct task_struct *tsk)
@@ -2849,7 +2802,9 @@ asmlinkage __visible void __sched schedule(void)
2849 struct task_struct *tsk = current; 2802 struct task_struct *tsk = current;
2850 2803
2851 sched_submit_work(tsk); 2804 sched_submit_work(tsk);
2852 __schedule(); 2805 do {
2806 __schedule();
2807 } while (need_resched());
2853} 2808}
2854EXPORT_SYMBOL(schedule); 2809EXPORT_SYMBOL(schedule);
2855 2810
@@ -2884,6 +2839,21 @@ void __sched schedule_preempt_disabled(void)
2884 preempt_disable(); 2839 preempt_disable();
2885} 2840}
2886 2841
2842static void __sched notrace preempt_schedule_common(void)
2843{
2844 do {
2845 __preempt_count_add(PREEMPT_ACTIVE);
2846 __schedule();
2847 __preempt_count_sub(PREEMPT_ACTIVE);
2848
2849 /*
2850 * Check again in case we missed a preemption opportunity
2851 * between schedule and now.
2852 */
2853 barrier();
2854 } while (need_resched());
2855}
2856
2887#ifdef CONFIG_PREEMPT 2857#ifdef CONFIG_PREEMPT
2888/* 2858/*
2889 * this is the entry point to schedule() from in-kernel preemption 2859 * this is the entry point to schedule() from in-kernel preemption
@@ -2899,17 +2869,7 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
2899 if (likely(!preemptible())) 2869 if (likely(!preemptible()))
2900 return; 2870 return;
2901 2871
2902 do { 2872 preempt_schedule_common();
2903 __preempt_count_add(PREEMPT_ACTIVE);
2904 __schedule();
2905 __preempt_count_sub(PREEMPT_ACTIVE);
2906
2907 /*
2908 * Check again in case we missed a preemption opportunity
2909 * between schedule and now.
2910 */
2911 barrier();
2912 } while (need_resched());
2913} 2873}
2914NOKPROBE_SYMBOL(preempt_schedule); 2874NOKPROBE_SYMBOL(preempt_schedule);
2915EXPORT_SYMBOL(preempt_schedule); 2875EXPORT_SYMBOL(preempt_schedule);
@@ -3405,6 +3365,20 @@ static bool check_same_owner(struct task_struct *p)
3405 return match; 3365 return match;
3406} 3366}
3407 3367
3368static bool dl_param_changed(struct task_struct *p,
3369 const struct sched_attr *attr)
3370{
3371 struct sched_dl_entity *dl_se = &p->dl;
3372
3373 if (dl_se->dl_runtime != attr->sched_runtime ||
3374 dl_se->dl_deadline != attr->sched_deadline ||
3375 dl_se->dl_period != attr->sched_period ||
3376 dl_se->flags != attr->sched_flags)
3377 return true;
3378
3379 return false;
3380}
3381
3408static int __sched_setscheduler(struct task_struct *p, 3382static int __sched_setscheduler(struct task_struct *p,
3409 const struct sched_attr *attr, 3383 const struct sched_attr *attr,
3410 bool user) 3384 bool user)
@@ -3533,7 +3507,7 @@ recheck:
3533 goto change; 3507 goto change;
3534 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) 3508 if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
3535 goto change; 3509 goto change;
3536 if (dl_policy(policy)) 3510 if (dl_policy(policy) && dl_param_changed(p, attr))
3537 goto change; 3511 goto change;
3538 3512
3539 p->sched_reset_on_fork = reset_on_fork; 3513 p->sched_reset_on_fork = reset_on_fork;
@@ -4225,17 +4199,10 @@ SYSCALL_DEFINE0(sched_yield)
4225 return 0; 4199 return 0;
4226} 4200}
4227 4201
4228static void __cond_resched(void)
4229{
4230 __preempt_count_add(PREEMPT_ACTIVE);
4231 __schedule();
4232 __preempt_count_sub(PREEMPT_ACTIVE);
4233}
4234
4235int __sched _cond_resched(void) 4202int __sched _cond_resched(void)
4236{ 4203{
4237 if (should_resched()) { 4204 if (should_resched()) {
4238 __cond_resched(); 4205 preempt_schedule_common();
4239 return 1; 4206 return 1;
4240 } 4207 }
4241 return 0; 4208 return 0;
@@ -4260,7 +4227,7 @@ int __cond_resched_lock(spinlock_t *lock)
4260 if (spin_needbreak(lock) || resched) { 4227 if (spin_needbreak(lock) || resched) {
4261 spin_unlock(lock); 4228 spin_unlock(lock);
4262 if (resched) 4229 if (resched)
4263 __cond_resched(); 4230 preempt_schedule_common();
4264 else 4231 else
4265 cpu_relax(); 4232 cpu_relax();
4266 ret = 1; 4233 ret = 1;
@@ -4276,7 +4243,7 @@ int __sched __cond_resched_softirq(void)
4276 4243
4277 if (should_resched()) { 4244 if (should_resched()) {
4278 local_bh_enable(); 4245 local_bh_enable();
4279 __cond_resched(); 4246 preempt_schedule_common();
4280 local_bh_disable(); 4247 local_bh_disable();
4281 return 1; 4248 return 1;
4282 } 4249 }
@@ -4391,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to);
4391 * This task is about to go to sleep on IO. Increment rq->nr_iowait so 4358 * This task is about to go to sleep on IO. Increment rq->nr_iowait so
4392 * that process accounting knows that this is a task in IO wait state. 4359 * that process accounting knows that this is a task in IO wait state.
4393 */ 4360 */
4394void __sched io_schedule(void)
4395{
4396 struct rq *rq = raw_rq();
4397
4398 delayacct_blkio_start();
4399 atomic_inc(&rq->nr_iowait);
4400 blk_flush_plug(current);
4401 current->in_iowait = 1;
4402 schedule();
4403 current->in_iowait = 0;
4404 atomic_dec(&rq->nr_iowait);
4405 delayacct_blkio_end();
4406}
4407EXPORT_SYMBOL(io_schedule);
4408
4409long __sched io_schedule_timeout(long timeout) 4361long __sched io_schedule_timeout(long timeout)
4410{ 4362{
4411 struct rq *rq = raw_rq(); 4363 int old_iowait = current->in_iowait;
4364 struct rq *rq;
4412 long ret; 4365 long ret;
4413 4366
4367 current->in_iowait = 1;
4368 if (old_iowait)
4369 blk_schedule_flush_plug(current);
4370 else
4371 blk_flush_plug(current);
4372
4414 delayacct_blkio_start(); 4373 delayacct_blkio_start();
4374 rq = raw_rq();
4415 atomic_inc(&rq->nr_iowait); 4375 atomic_inc(&rq->nr_iowait);
4416 blk_flush_plug(current);
4417 current->in_iowait = 1;
4418 ret = schedule_timeout(timeout); 4376 ret = schedule_timeout(timeout);
4419 current->in_iowait = 0; 4377 current->in_iowait = old_iowait;
4420 atomic_dec(&rq->nr_iowait); 4378 atomic_dec(&rq->nr_iowait);
4421 delayacct_blkio_end(); 4379 delayacct_blkio_end();
4380
4422 return ret; 4381 return ret;
4423} 4382}
4383EXPORT_SYMBOL(io_schedule_timeout);
4424 4384
4425/** 4385/**
4426 * sys_sched_get_priority_max - return maximum RT priority. 4386 * sys_sched_get_priority_max - return maximum RT priority.
@@ -4531,9 +4491,10 @@ void sched_show_task(struct task_struct *p)
4531{ 4491{
4532 unsigned long free = 0; 4492 unsigned long free = 0;
4533 int ppid; 4493 int ppid;
4534 unsigned state; 4494 unsigned long state = p->state;
4535 4495
4536 state = p->state ? __ffs(p->state) + 1 : 0; 4496 if (state)
4497 state = __ffs(state) + 1;
4537 printk(KERN_INFO "%-15.15s %c", p->comm, 4498 printk(KERN_INFO "%-15.15s %c", p->comm,
4538 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4499 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4539#if BITS_PER_LONG == 32 4500#if BITS_PER_LONG == 32
@@ -4766,7 +4727,7 @@ static struct rq *move_queued_task(struct task_struct *p, int new_cpu)
4766 4727
4767void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) 4728void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4768{ 4729{
4769 if (p->sched_class && p->sched_class->set_cpus_allowed) 4730 if (p->sched_class->set_cpus_allowed)
4770 p->sched_class->set_cpus_allowed(p, new_mask); 4731 p->sched_class->set_cpus_allowed(p, new_mask);
4771 4732
4772 cpumask_copy(&p->cpus_allowed, new_mask); 4733 cpumask_copy(&p->cpus_allowed, new_mask);
@@ -5434,9 +5395,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5434 struct cpumask *groupmask) 5395 struct cpumask *groupmask)
5435{ 5396{
5436 struct sched_group *group = sd->groups; 5397 struct sched_group *group = sd->groups;
5437 char str[256];
5438 5398
5439 cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
5440 cpumask_clear(groupmask); 5399 cpumask_clear(groupmask);
5441 5400
5442 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 5401 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -5449,7 +5408,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5449 return -1; 5408 return -1;
5450 } 5409 }
5451 5410
5452 printk(KERN_CONT "span %s level %s\n", str, sd->name); 5411 printk(KERN_CONT "span %*pbl level %s\n",
5412 cpumask_pr_args(sched_domain_span(sd)), sd->name);
5453 5413
5454 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { 5414 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
5455 printk(KERN_ERR "ERROR: domain->span does not contain " 5415 printk(KERN_ERR "ERROR: domain->span does not contain "
@@ -5494,9 +5454,8 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5494 5454
5495 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 5455 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
5496 5456
5497 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 5457 printk(KERN_CONT " %*pbl",
5498 5458 cpumask_pr_args(sched_group_cpus(group)));
5499 printk(KERN_CONT " %s", str);
5500 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { 5459 if (group->sgc->capacity != SCHED_CAPACITY_SCALE) {
5501 printk(KERN_CONT " (cpu_capacity = %d)", 5460 printk(KERN_CONT " (cpu_capacity = %d)",
5502 group->sgc->capacity); 5461 group->sgc->capacity);
@@ -7276,6 +7235,11 @@ void __init sched_init(void)
7276 enter_lazy_tlb(&init_mm, current); 7235 enter_lazy_tlb(&init_mm, current);
7277 7236
7278 /* 7237 /*
7238 * During early bootup we pretend to be a normal task:
7239 */
7240 current->sched_class = &fair_sched_class;
7241
7242 /*
7279 * Make us the idle thread. Technically, schedule() should not be 7243 * Make us the idle thread. Technically, schedule() should not be
7280 * called from this thread, however somewhere below it might be, 7244 * called from this thread, however somewhere below it might be,
7281 * but because we are the idle thread, we just pick up running again 7245 * but because we are the idle thread, we just pick up running again
@@ -7285,11 +7249,6 @@ void __init sched_init(void)
7285 7249
7286 calc_load_update = jiffies + LOAD_FREQ; 7250 calc_load_update = jiffies + LOAD_FREQ;
7287 7251
7288 /*
7289 * During early bootup we pretend to be a normal task:
7290 */
7291 current->sched_class = &fair_sched_class;
7292
7293#ifdef CONFIG_SMP 7252#ifdef CONFIG_SMP
7294 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); 7253 zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT);
7295 /* May be allocated at isolcpus cmdline parse time */ 7254 /* May be allocated at isolcpus cmdline parse time */
@@ -7350,6 +7309,9 @@ void ___might_sleep(const char *file, int line, int preempt_offset)
7350 in_atomic(), irqs_disabled(), 7309 in_atomic(), irqs_disabled(),
7351 current->pid, current->comm); 7310 current->pid, current->comm);
7352 7311
7312 if (task_stack_end_corrupted(current))
7313 printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
7314
7353 debug_show_held_locks(current); 7315 debug_show_held_locks(current);
7354 if (irqs_disabled()) 7316 if (irqs_disabled())
7355 print_irqtrace_events(current); 7317 print_irqtrace_events(current);
@@ -7613,6 +7575,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
7613{ 7575{
7614 struct task_struct *g, *p; 7576 struct task_struct *g, *p;
7615 7577
7578 /*
7579 * Autogroups do not have RT tasks; see autogroup_create().
7580 */
7581 if (task_group_is_autogroup(tg))
7582 return 0;
7583
7616 for_each_process_thread(g, p) { 7584 for_each_process_thread(g, p) {
7617 if (rt_task(p) && task_group(p) == tg) 7585 if (rt_task(p) && task_group(p) == tg)
7618 return 1; 7586 return 1;
@@ -7705,6 +7673,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
7705{ 7673{
7706 int i, err = 0; 7674 int i, err = 0;
7707 7675
7676 /*
7677 * Disallowing the root group RT runtime is BAD, it would disallow the
7678 * kernel creating (and or operating) RT threads.
7679 */
7680 if (tg == &root_task_group && rt_runtime == 0)
7681 return -EINVAL;
7682
7683 /* No period doesn't make any sense. */
7684 if (rt_period == 0)
7685 return -EINVAL;
7686
7708 mutex_lock(&rt_constraints_mutex); 7687 mutex_lock(&rt_constraints_mutex);
7709 read_lock(&tasklist_lock); 7688 read_lock(&tasklist_lock);
7710 err = __rt_schedulable(tg, rt_period, rt_runtime); 7689 err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7761,9 +7740,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
7761 rt_period = (u64)rt_period_us * NSEC_PER_USEC; 7740 rt_period = (u64)rt_period_us * NSEC_PER_USEC;
7762 rt_runtime = tg->rt_bandwidth.rt_runtime; 7741 rt_runtime = tg->rt_bandwidth.rt_runtime;
7763 7742
7764 if (rt_period == 0)
7765 return -EINVAL;
7766
7767 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); 7743 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
7768} 7744}
7769 7745