diff options
-rw-r--r-- | include/linux/sched.h | 9 | ||||
-rw-r--r-- | include/linux/wait.h | 4 | ||||
-rw-r--r-- | kernel/sched.c | 30 | ||||
-rw-r--r-- | kernel/sched_fair.c | 6 | ||||
-rw-r--r-- | kernel/sched_idletask.c | 4 | ||||
-rw-r--r-- | kernel/sched_rt.c | 4 |
6 files changed, 33 insertions, 24 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 5c116f03d74c..3b07168b6f03 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1024,6 +1024,11 @@ struct uts_namespace; | |||
1024 | struct rq; | 1024 | struct rq; |
1025 | struct sched_domain; | 1025 | struct sched_domain; |
1026 | 1026 | ||
1027 | /* | ||
1028 | * wake flags | ||
1029 | */ | ||
1030 | #define WF_SYNC 0x01 /* waker goes to sleep after wakup */ | ||
1031 | |||
1027 | struct sched_class { | 1032 | struct sched_class { |
1028 | const struct sched_class *next; | 1033 | const struct sched_class *next; |
1029 | 1034 | ||
@@ -1031,13 +1036,13 @@ struct sched_class { | |||
1031 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | 1036 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); |
1032 | void (*yield_task) (struct rq *rq); | 1037 | void (*yield_task) (struct rq *rq); |
1033 | 1038 | ||
1034 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); | 1039 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int flags); |
1035 | 1040 | ||
1036 | struct task_struct * (*pick_next_task) (struct rq *rq); | 1041 | struct task_struct * (*pick_next_task) (struct rq *rq); |
1037 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); | 1042 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
1038 | 1043 | ||
1039 | #ifdef CONFIG_SMP | 1044 | #ifdef CONFIG_SMP |
1040 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int sync); | 1045 | int (*select_task_rq)(struct task_struct *p, int sd_flag, int flags); |
1041 | 1046 | ||
1042 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, | 1047 | unsigned long (*load_balance) (struct rq *this_rq, int this_cpu, |
1043 | struct rq *busiest, unsigned long max_load_move, | 1048 | struct rq *busiest, unsigned long max_load_move, |
diff --git a/include/linux/wait.h b/include/linux/wait.h index cf3c2f5dba51..a48e16b77d5e 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -26,8 +26,8 @@ | |||
26 | #include <asm/current.h> | 26 | #include <asm/current.h> |
27 | 27 | ||
28 | typedef struct __wait_queue wait_queue_t; | 28 | typedef struct __wait_queue wait_queue_t; |
29 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int sync, void *key); | 29 | typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); |
30 | int default_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | 30 | int default_wake_function(wait_queue_t *wait, unsigned mode, int flags, void *key); |
31 | 31 | ||
32 | struct __wait_queue { | 32 | struct __wait_queue { |
33 | unsigned int flags; | 33 | unsigned int flags; |
diff --git a/kernel/sched.c b/kernel/sched.c index e8e603bf8761..4da335cec8ee 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -636,9 +636,10 @@ struct rq { | |||
636 | 636 | ||
637 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 637 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
638 | 638 | ||
639 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync) | 639 | static inline |
640 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
640 | { | 641 | { |
641 | rq->curr->sched_class->check_preempt_curr(rq, p, sync); | 642 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); |
642 | } | 643 | } |
643 | 644 | ||
644 | static inline int cpu_of(struct rq *rq) | 645 | static inline int cpu_of(struct rq *rq) |
@@ -2318,14 +2319,15 @@ void task_oncpu_function_call(struct task_struct *p, | |||
2318 | * | 2319 | * |
2319 | * returns failure only if the task is already active. | 2320 | * returns failure only if the task is already active. |
2320 | */ | 2321 | */ |
2321 | static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | 2322 | static int try_to_wake_up(struct task_struct *p, unsigned int state, |
2323 | int wake_flags) | ||
2322 | { | 2324 | { |
2323 | int cpu, orig_cpu, this_cpu, success = 0; | 2325 | int cpu, orig_cpu, this_cpu, success = 0; |
2324 | unsigned long flags; | 2326 | unsigned long flags; |
2325 | struct rq *rq; | 2327 | struct rq *rq; |
2326 | 2328 | ||
2327 | if (!sched_feat(SYNC_WAKEUPS)) | 2329 | if (!sched_feat(SYNC_WAKEUPS)) |
2328 | sync = 0; | 2330 | wake_flags &= ~WF_SYNC; |
2329 | 2331 | ||
2330 | this_cpu = get_cpu(); | 2332 | this_cpu = get_cpu(); |
2331 | 2333 | ||
@@ -2352,7 +2354,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2352 | p->state = TASK_WAKING; | 2354 | p->state = TASK_WAKING; |
2353 | task_rq_unlock(rq, &flags); | 2355 | task_rq_unlock(rq, &flags); |
2354 | 2356 | ||
2355 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, sync); | 2357 | cpu = p->sched_class->select_task_rq(p, SD_BALANCE_WAKE, wake_flags); |
2356 | if (cpu != orig_cpu) | 2358 | if (cpu != orig_cpu) |
2357 | set_task_cpu(p, cpu); | 2359 | set_task_cpu(p, cpu); |
2358 | 2360 | ||
@@ -2378,7 +2380,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync) | |||
2378 | out_activate: | 2380 | out_activate: |
2379 | #endif /* CONFIG_SMP */ | 2381 | #endif /* CONFIG_SMP */ |
2380 | schedstat_inc(p, se.nr_wakeups); | 2382 | schedstat_inc(p, se.nr_wakeups); |
2381 | if (sync) | 2383 | if (wake_flags & WF_SYNC) |
2382 | schedstat_inc(p, se.nr_wakeups_sync); | 2384 | schedstat_inc(p, se.nr_wakeups_sync); |
2383 | if (orig_cpu != cpu) | 2385 | if (orig_cpu != cpu) |
2384 | schedstat_inc(p, se.nr_wakeups_migrate); | 2386 | schedstat_inc(p, se.nr_wakeups_migrate); |
@@ -2407,7 +2409,7 @@ out_activate: | |||
2407 | 2409 | ||
2408 | out_running: | 2410 | out_running: |
2409 | trace_sched_wakeup(rq, p, success); | 2411 | trace_sched_wakeup(rq, p, success); |
2410 | check_preempt_curr(rq, p, sync); | 2412 | check_preempt_curr(rq, p, wake_flags); |
2411 | 2413 | ||
2412 | p->state = TASK_RUNNING; | 2414 | p->state = TASK_RUNNING; |
2413 | #ifdef CONFIG_SMP | 2415 | #ifdef CONFIG_SMP |
@@ -5562,10 +5564,10 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
5562 | 5564 | ||
5563 | #endif /* CONFIG_PREEMPT */ | 5565 | #endif /* CONFIG_PREEMPT */ |
5564 | 5566 | ||
5565 | int default_wake_function(wait_queue_t *curr, unsigned mode, int sync, | 5567 | int default_wake_function(wait_queue_t *curr, unsigned mode, int flags, |
5566 | void *key) | 5568 | void *key) |
5567 | { | 5569 | { |
5568 | return try_to_wake_up(curr->private, mode, sync); | 5570 | return try_to_wake_up(curr->private, mode, flags); |
5569 | } | 5571 | } |
5570 | EXPORT_SYMBOL(default_wake_function); | 5572 | EXPORT_SYMBOL(default_wake_function); |
5571 | 5573 | ||
@@ -5579,14 +5581,14 @@ EXPORT_SYMBOL(default_wake_function); | |||
5579 | * zero in this (rare) case, and we handle it by continuing to scan the queue. | 5581 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
5580 | */ | 5582 | */ |
5581 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, | 5583 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
5582 | int nr_exclusive, int sync, void *key) | 5584 | int nr_exclusive, int flags, void *key) |
5583 | { | 5585 | { |
5584 | wait_queue_t *curr, *next; | 5586 | wait_queue_t *curr, *next; |
5585 | 5587 | ||
5586 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { | 5588 | list_for_each_entry_safe(curr, next, &q->task_list, task_list) { |
5587 | unsigned flags = curr->flags; | 5589 | unsigned flags = curr->flags; |
5588 | 5590 | ||
5589 | if (curr->func(curr, mode, sync, key) && | 5591 | if (curr->func(curr, mode, flags, key) && |
5590 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) | 5592 | (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) |
5591 | break; | 5593 | break; |
5592 | } | 5594 | } |
@@ -5647,16 +5649,16 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, | |||
5647 | int nr_exclusive, void *key) | 5649 | int nr_exclusive, void *key) |
5648 | { | 5650 | { |
5649 | unsigned long flags; | 5651 | unsigned long flags; |
5650 | int sync = 1; | 5652 | int wake_flags = WF_SYNC; |
5651 | 5653 | ||
5652 | if (unlikely(!q)) | 5654 | if (unlikely(!q)) |
5653 | return; | 5655 | return; |
5654 | 5656 | ||
5655 | if (unlikely(!nr_exclusive)) | 5657 | if (unlikely(!nr_exclusive)) |
5656 | sync = 0; | 5658 | wake_flags = 0; |
5657 | 5659 | ||
5658 | spin_lock_irqsave(&q->lock, flags); | 5660 | spin_lock_irqsave(&q->lock, flags); |
5659 | __wake_up_common(q, mode, nr_exclusive, sync, key); | 5661 | __wake_up_common(q, mode, nr_exclusive, wake_flags, key); |
5660 | spin_unlock_irqrestore(&q->lock, flags); | 5662 | spin_unlock_irqrestore(&q->lock, flags); |
5661 | } | 5663 | } |
5662 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); | 5664 | EXPORT_SYMBOL_GPL(__wake_up_sync_key); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index b554e63c521a..007958e3c93a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -1331,13 +1331,14 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) | |||
1331 | * | 1331 | * |
1332 | * preempt must be disabled. | 1332 | * preempt must be disabled. |
1333 | */ | 1333 | */ |
1334 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int sync) | 1334 | static int select_task_rq_fair(struct task_struct *p, int sd_flag, int flags) |
1335 | { | 1335 | { |
1336 | struct sched_domain *tmp, *sd = NULL; | 1336 | struct sched_domain *tmp, *sd = NULL; |
1337 | int cpu = smp_processor_id(); | 1337 | int cpu = smp_processor_id(); |
1338 | int prev_cpu = task_cpu(p); | 1338 | int prev_cpu = task_cpu(p); |
1339 | int new_cpu = cpu; | 1339 | int new_cpu = cpu; |
1340 | int want_affine = 0; | 1340 | int want_affine = 0; |
1341 | int sync = flags & WF_SYNC; | ||
1341 | 1342 | ||
1342 | if (sd_flag & SD_BALANCE_WAKE) { | 1343 | if (sd_flag & SD_BALANCE_WAKE) { |
1343 | if (sched_feat(AFFINE_WAKEUPS)) | 1344 | if (sched_feat(AFFINE_WAKEUPS)) |
@@ -1548,11 +1549,12 @@ static void set_next_buddy(struct sched_entity *se) | |||
1548 | /* | 1549 | /* |
1549 | * Preempt the current task with a newly woken task if needed: | 1550 | * Preempt the current task with a newly woken task if needed: |
1550 | */ | 1551 | */ |
1551 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) | 1552 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int flags) |
1552 | { | 1553 | { |
1553 | struct task_struct *curr = rq->curr; | 1554 | struct task_struct *curr = rq->curr; |
1554 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1555 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1555 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1556 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1557 | int sync = flags & WF_SYNC; | ||
1556 | 1558 | ||
1557 | update_curr(cfs_rq); | 1559 | update_curr(cfs_rq); |
1558 | 1560 | ||
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 9ff7697e5dc4..a8b448af004b 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -6,7 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifdef CONFIG_SMP | 8 | #ifdef CONFIG_SMP |
9 | static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync) | 9 | static int select_task_rq_idle(struct task_struct *p, int sd_flag, int flags) |
10 | { | 10 | { |
11 | return task_cpu(p); /* IDLE tasks as never migrated */ | 11 | return task_cpu(p); /* IDLE tasks as never migrated */ |
12 | } | 12 | } |
@@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sd_flag, int sync) | |||
14 | /* | 14 | /* |
15 | * Idle tasks are unconditionally rescheduled: | 15 | * Idle tasks are unconditionally rescheduled: |
16 | */ | 16 | */ |
17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync) | 17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) |
18 | { | 18 | { |
19 | resched_task(rq->idle); | 19 | resched_task(rq->idle); |
20 | } | 20 | } |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 97c53f3f51a7..13de7126a6ab 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -938,7 +938,7 @@ static void yield_task_rt(struct rq *rq) | |||
938 | #ifdef CONFIG_SMP | 938 | #ifdef CONFIG_SMP |
939 | static int find_lowest_rq(struct task_struct *task); | 939 | static int find_lowest_rq(struct task_struct *task); |
940 | 940 | ||
941 | static int select_task_rq_rt(struct task_struct *p, int sd_flag, int sync) | 941 | static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) |
942 | { | 942 | { |
943 | struct rq *rq = task_rq(p); | 943 | struct rq *rq = task_rq(p); |
944 | 944 | ||
@@ -1002,7 +1002,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
1002 | /* | 1002 | /* |
1003 | * Preempt the current task with a newly woken task if needed: | 1003 | * Preempt the current task with a newly woken task if needed: |
1004 | */ | 1004 | */ |
1005 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync) | 1005 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags) |
1006 | { | 1006 | { |
1007 | if (p->prio < rq->curr->prio) { | 1007 | if (p->prio < rq->curr->prio) { |
1008 | resched_task(rq->curr); | 1008 | resched_task(rq->curr); |