diff options
author | Ingo Molnar <mingo@elte.hu> | 2007-11-09 16:39:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2007-11-09 16:39:39 -0500 |
commit | 3e3e13f399ac8060a20d14d210a28dc02dda372e (patch) | |
tree | b560a614e926f5f90e4096b6d4743b1b5fdfccb2 | |
parent | 52d3da1ad4f442cec877fbeb83902707b56da0cf (diff) |
sched: remove PREEMPT_RESTRICT
remove PREEMPT_RESTRICT. (this is a separate commit so that any
regression related to the removal itself is bisectable)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/sched_fair.c | 10 |
3 files changed, 3 insertions, 12 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 951759e30c09..93fd30d6dac4 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -863,7 +863,6 @@ struct sched_entity { | |||
863 | struct load_weight load; /* for load-balancing */ | 863 | struct load_weight load; /* for load-balancing */ |
864 | struct rb_node run_node; | 864 | struct rb_node run_node; |
865 | unsigned int on_rq; | 865 | unsigned int on_rq; |
866 | int peer_preempt; | ||
867 | 866 | ||
868 | u64 exec_start; | 867 | u64 exec_start; |
869 | u64 sum_exec_runtime; | 868 | u64 sum_exec_runtime; |
diff --git a/kernel/sched.c b/kernel/sched.c index 4b23dfb4c80f..2a107e4ad5ed 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -460,7 +460,6 @@ enum { | |||
460 | SCHED_FEAT_TREE_AVG = 4, | 460 | SCHED_FEAT_TREE_AVG = 4, |
461 | SCHED_FEAT_APPROX_AVG = 8, | 461 | SCHED_FEAT_APPROX_AVG = 8, |
462 | SCHED_FEAT_WAKEUP_PREEMPT = 16, | 462 | SCHED_FEAT_WAKEUP_PREEMPT = 16, |
463 | SCHED_FEAT_PREEMPT_RESTRICT = 32, | ||
464 | }; | 463 | }; |
465 | 464 | ||
466 | const_debug unsigned int sysctl_sched_features = | 465 | const_debug unsigned int sysctl_sched_features = |
@@ -468,8 +467,7 @@ const_debug unsigned int sysctl_sched_features = | |||
468 | SCHED_FEAT_START_DEBIT * 1 | | 467 | SCHED_FEAT_START_DEBIT * 1 | |
469 | SCHED_FEAT_TREE_AVG * 0 | | 468 | SCHED_FEAT_TREE_AVG * 0 | |
470 | SCHED_FEAT_APPROX_AVG * 0 | | 469 | SCHED_FEAT_APPROX_AVG * 0 | |
471 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | | 470 | SCHED_FEAT_WAKEUP_PREEMPT * 1; |
472 | SCHED_FEAT_PREEMPT_RESTRICT * 0; | ||
473 | 471 | ||
474 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) | 472 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) |
475 | 473 | ||
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 7264814ba62a..fbcb426029d0 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -546,7 +546,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) | |||
546 | 546 | ||
547 | update_stats_dequeue(cfs_rq, se); | 547 | update_stats_dequeue(cfs_rq, se); |
548 | if (sleep) { | 548 | if (sleep) { |
549 | se->peer_preempt = 0; | ||
550 | #ifdef CONFIG_SCHEDSTATS | 549 | #ifdef CONFIG_SCHEDSTATS |
551 | if (entity_is_task(se)) { | 550 | if (entity_is_task(se)) { |
552 | struct task_struct *tsk = task_of(se); | 551 | struct task_struct *tsk = task_of(se); |
@@ -574,10 +573,8 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
574 | 573 | ||
575 | ideal_runtime = sched_slice(cfs_rq, curr); | 574 | ideal_runtime = sched_slice(cfs_rq, curr); |
576 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; | 575 | delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; |
577 | if (delta_exec > ideal_runtime || | 576 | if (delta_exec > ideal_runtime) |
578 | (sched_feat(PREEMPT_RESTRICT) && curr->peer_preempt)) | ||
579 | resched_task(rq_of(cfs_rq)->curr); | 577 | resched_task(rq_of(cfs_rq)->curr); |
580 | curr->peer_preempt = 0; | ||
581 | } | 578 | } |
582 | 579 | ||
583 | static void | 580 | static void |
@@ -867,9 +864,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
867 | gran = calc_delta_fair(gran, &se->load); | 864 | gran = calc_delta_fair(gran, &se->load); |
868 | 865 | ||
869 | if (delta > gran) { | 866 | if (delta > gran) { |
870 | int now = !sched_feat(PREEMPT_RESTRICT); | 867 | if (p->prio < curr->prio) |
871 | |||
872 | if (now || p->prio < curr->prio || !se->peer_preempt++) | ||
873 | resched_task(curr); | 868 | resched_task(curr); |
874 | } | 869 | } |
875 | } | 870 | } |
@@ -1083,7 +1078,6 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1083 | swap(curr->vruntime, se->vruntime); | 1078 | swap(curr->vruntime, se->vruntime); |
1084 | } | 1079 | } |
1085 | 1080 | ||
1086 | se->peer_preempt = 0; | ||
1087 | enqueue_task_fair(rq, p, 0); | 1081 | enqueue_task_fair(rq, p, 0); |
1088 | resched_task(rq->curr); | 1082 | resched_task(rq->curr); |
1089 | } | 1083 | } |