diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 113 |
1 files changed, 30 insertions, 83 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 13049aac05a6..f0f831e8a345 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -307,66 +307,6 @@ __read_mostly int scheduler_running; | |||
307 | int sysctl_sched_rt_runtime = 950000; | 307 | int sysctl_sched_rt_runtime = 950000; |
308 | 308 | ||
309 | /* | 309 | /* |
310 | * __task_rq_lock - lock the rq @p resides on. | ||
311 | */ | ||
312 | static inline struct rq *__task_rq_lock(struct task_struct *p) | ||
313 | __acquires(rq->lock) | ||
314 | { | ||
315 | struct rq *rq; | ||
316 | |||
317 | lockdep_assert_held(&p->pi_lock); | ||
318 | |||
319 | for (;;) { | ||
320 | rq = task_rq(p); | ||
321 | raw_spin_lock(&rq->lock); | ||
322 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | ||
323 | return rq; | ||
324 | raw_spin_unlock(&rq->lock); | ||
325 | |||
326 | while (unlikely(task_on_rq_migrating(p))) | ||
327 | cpu_relax(); | ||
328 | } | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. | ||
333 | */ | ||
334 | static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) | ||
335 | __acquires(p->pi_lock) | ||
336 | __acquires(rq->lock) | ||
337 | { | ||
338 | struct rq *rq; | ||
339 | |||
340 | for (;;) { | ||
341 | raw_spin_lock_irqsave(&p->pi_lock, *flags); | ||
342 | rq = task_rq(p); | ||
343 | raw_spin_lock(&rq->lock); | ||
344 | if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) | ||
345 | return rq; | ||
346 | raw_spin_unlock(&rq->lock); | ||
347 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
348 | |||
349 | while (unlikely(task_on_rq_migrating(p))) | ||
350 | cpu_relax(); | ||
351 | } | ||
352 | } | ||
353 | |||
354 | static void __task_rq_unlock(struct rq *rq) | ||
355 | __releases(rq->lock) | ||
356 | { | ||
357 | raw_spin_unlock(&rq->lock); | ||
358 | } | ||
359 | |||
360 | static inline void | ||
361 | task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) | ||
362 | __releases(rq->lock) | ||
363 | __releases(p->pi_lock) | ||
364 | { | ||
365 | raw_spin_unlock(&rq->lock); | ||
366 | raw_spin_unlock_irqrestore(&p->pi_lock, *flags); | ||
367 | } | ||
368 | |||
369 | /* | ||
370 | * this_rq_lock - lock this runqueue and disable interrupts. | 310 | * this_rq_lock - lock this runqueue and disable interrupts. |
371 | */ | 311 | */ |
372 | static struct rq *this_rq_lock(void) | 312 | static struct rq *this_rq_lock(void) |
@@ -2899,7 +2839,7 @@ void __sched schedule_preempt_disabled(void) | |||
2899 | preempt_disable(); | 2839 | preempt_disable(); |
2900 | } | 2840 | } |
2901 | 2841 | ||
2902 | static void preempt_schedule_common(void) | 2842 | static void __sched notrace preempt_schedule_common(void) |
2903 | { | 2843 | { |
2904 | do { | 2844 | do { |
2905 | __preempt_count_add(PREEMPT_ACTIVE); | 2845 | __preempt_count_add(PREEMPT_ACTIVE); |
@@ -4418,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to); | |||
4418 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so | 4358 | * This task is about to go to sleep on IO. Increment rq->nr_iowait so |
4419 | * that process accounting knows that this is a task in IO wait state. | 4359 | * that process accounting knows that this is a task in IO wait state. |
4420 | */ | 4360 | */ |
4421 | void __sched io_schedule(void) | ||
4422 | { | ||
4423 | struct rq *rq = raw_rq(); | ||
4424 | |||
4425 | delayacct_blkio_start(); | ||
4426 | atomic_inc(&rq->nr_iowait); | ||
4427 | blk_flush_plug(current); | ||
4428 | current->in_iowait = 1; | ||
4429 | schedule(); | ||
4430 | current->in_iowait = 0; | ||
4431 | atomic_dec(&rq->nr_iowait); | ||
4432 | delayacct_blkio_end(); | ||
4433 | } | ||
4434 | EXPORT_SYMBOL(io_schedule); | ||
4435 | |||
4436 | long __sched io_schedule_timeout(long timeout) | 4361 | long __sched io_schedule_timeout(long timeout) |
4437 | { | 4362 | { |
4438 | struct rq *rq = raw_rq(); | 4363 | int old_iowait = current->in_iowait; |
4364 | struct rq *rq; | ||
4439 | long ret; | 4365 | long ret; |
4440 | 4366 | ||
4367 | current->in_iowait = 1; | ||
4368 | if (old_iowait) | ||
4369 | blk_schedule_flush_plug(current); | ||
4370 | else | ||
4371 | blk_flush_plug(current); | ||
4372 | |||
4441 | delayacct_blkio_start(); | 4373 | delayacct_blkio_start(); |
4374 | rq = raw_rq(); | ||
4442 | atomic_inc(&rq->nr_iowait); | 4375 | atomic_inc(&rq->nr_iowait); |
4443 | blk_flush_plug(current); | ||
4444 | current->in_iowait = 1; | ||
4445 | ret = schedule_timeout(timeout); | 4376 | ret = schedule_timeout(timeout); |
4446 | current->in_iowait = 0; | 4377 | current->in_iowait = old_iowait; |
4447 | atomic_dec(&rq->nr_iowait); | 4378 | atomic_dec(&rq->nr_iowait); |
4448 | delayacct_blkio_end(); | 4379 | delayacct_blkio_end(); |
4380 | |||
4449 | return ret; | 4381 | return ret; |
4450 | } | 4382 | } |
4383 | EXPORT_SYMBOL(io_schedule_timeout); | ||
4451 | 4384 | ||
4452 | /** | 4385 | /** |
4453 | * sys_sched_get_priority_max - return maximum RT priority. | 4386 | * sys_sched_get_priority_max - return maximum RT priority. |
@@ -7642,6 +7575,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg) | |||
7642 | { | 7575 | { |
7643 | struct task_struct *g, *p; | 7576 | struct task_struct *g, *p; |
7644 | 7577 | ||
7578 | /* | ||
7579 | * Autogroups do not have RT tasks; see autogroup_create(). | ||
7580 | */ | ||
7581 | if (task_group_is_autogroup(tg)) | ||
7582 | return 0; | ||
7583 | |||
7645 | for_each_process_thread(g, p) { | 7584 | for_each_process_thread(g, p) { |
7646 | if (rt_task(p) && task_group(p) == tg) | 7585 | if (rt_task(p) && task_group(p) == tg) |
7647 | return 1; | 7586 | return 1; |
@@ -7734,6 +7673,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg, | |||
7734 | { | 7673 | { |
7735 | int i, err = 0; | 7674 | int i, err = 0; |
7736 | 7675 | ||
7676 | /* | ||
7677 | * Disallowing the root group RT runtime is BAD, it would disallow the | ||
7678 | * kernel creating (and or operating) RT threads. | ||
7679 | */ | ||
7680 | if (tg == &root_task_group && rt_runtime == 0) | ||
7681 | return -EINVAL; | ||
7682 | |||
7683 | /* No period doesn't make any sense. */ | ||
7684 | if (rt_period == 0) | ||
7685 | return -EINVAL; | ||
7686 | |||
7737 | mutex_lock(&rt_constraints_mutex); | 7687 | mutex_lock(&rt_constraints_mutex); |
7738 | read_lock(&tasklist_lock); | 7688 | read_lock(&tasklist_lock); |
7739 | err = __rt_schedulable(tg, rt_period, rt_runtime); | 7689 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
@@ -7790,9 +7740,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) | |||
7790 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; | 7740 | rt_period = (u64)rt_period_us * NSEC_PER_USEC; |
7791 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 7741 | rt_runtime = tg->rt_bandwidth.rt_runtime; |
7792 | 7742 | ||
7793 | if (rt_period == 0) | ||
7794 | return -EINVAL; | ||
7795 | |||
7796 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); | 7743 | return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); |
7797 | } | 7744 | } |
7798 | 7745 | ||