aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2007-08-09 05:16:51 -0400
committerIngo Molnar <mingo@elte.hu>2007-08-09 05:16:51 -0400
commitbdd4dfa89c1e3e1379729b9edec1526b3ecc25ec (patch)
tree9bf9002699184a1171522735b2be92bead6df02e
parent2e1cb74a501c4b1bca5e55dabff24f267349193c (diff)
sched: remove the 'u64 now' local variables
final step: remove all (now superfluous) 'u64 now' variables. ( identity transformation that causes no change in functionality. ) Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/sched_fair.c6
2 files changed, 0 insertions, 22 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 2dc5d2f7b392..b78b9d9ffd1c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -910,10 +910,7 @@ static int effective_prio(struct task_struct *p)
910 */ 910 */
911static void activate_task(struct rq *rq, struct task_struct *p, int wakeup) 911static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
912{ 912{
913 u64 now;
914
915 update_rq_clock(rq); 913 update_rq_clock(rq);
916 now = rq->clock;
917 914
918 if (p->state == TASK_UNINTERRUPTIBLE) 915 if (p->state == TASK_UNINTERRUPTIBLE)
919 rq->nr_uninterruptible--; 916 rq->nr_uninterruptible--;
@@ -927,10 +924,7 @@ static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
927 */ 924 */
928static inline void activate_idle_task(struct task_struct *p, struct rq *rq) 925static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
929{ 926{
930 u64 now;
931
932 update_rq_clock(rq); 927 update_rq_clock(rq);
933 now = rq->clock;
934 928
935 if (p->state == TASK_UNINTERRUPTIBLE) 929 if (p->state == TASK_UNINTERRUPTIBLE)
936 rq->nr_uninterruptible--; 930 rq->nr_uninterruptible--;
@@ -1647,13 +1641,11 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1647 unsigned long flags; 1641 unsigned long flags;
1648 struct rq *rq; 1642 struct rq *rq;
1649 int this_cpu; 1643 int this_cpu;
1650 u64 now;
1651 1644
1652 rq = task_rq_lock(p, &flags); 1645 rq = task_rq_lock(p, &flags);
1653 BUG_ON(p->state != TASK_RUNNING); 1646 BUG_ON(p->state != TASK_RUNNING);
1654 this_cpu = smp_processor_id(); /* parent's CPU */ 1647 this_cpu = smp_processor_id(); /* parent's CPU */
1655 update_rq_clock(rq); 1648 update_rq_clock(rq);
1656 now = rq->clock;
1657 1649
1658 p->prio = effective_prio(p); 1650 p->prio = effective_prio(p);
1659 1651
@@ -1955,11 +1947,9 @@ static void update_cpu_load(struct rq *this_rq)
1955 unsigned long total_load = this_rq->ls.load.weight; 1947 unsigned long total_load = this_rq->ls.load.weight;
1956 unsigned long this_load = total_load; 1948 unsigned long this_load = total_load;
1957 struct load_stat *ls = &this_rq->ls; 1949 struct load_stat *ls = &this_rq->ls;
1958 u64 now;
1959 int i, scale; 1950 int i, scale;
1960 1951
1961 __update_rq_clock(this_rq); 1952 __update_rq_clock(this_rq);
1962 now = this_rq->clock;
1963 1953
1964 this_rq->nr_load_updates++; 1954 this_rq->nr_load_updates++;
1965 if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD))) 1955 if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
@@ -3431,7 +3421,6 @@ asmlinkage void __sched schedule(void)
3431 struct task_struct *prev, *next; 3421 struct task_struct *prev, *next;
3432 long *switch_count; 3422 long *switch_count;
3433 struct rq *rq; 3423 struct rq *rq;
3434 u64 now;
3435 int cpu; 3424 int cpu;
3436 3425
3437need_resched: 3426need_resched:
@@ -3450,7 +3439,6 @@ need_resched_nonpreemptible:
3450 spin_lock_irq(&rq->lock); 3439 spin_lock_irq(&rq->lock);
3451 clear_tsk_need_resched(prev); 3440 clear_tsk_need_resched(prev);
3452 __update_rq_clock(rq); 3441 __update_rq_clock(rq);
3453 now = rq->clock;
3454 3442
3455 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { 3443 if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
3456 if (unlikely((prev->state & TASK_INTERRUPTIBLE) && 3444 if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
@@ -3909,13 +3897,11 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
3909 unsigned long flags; 3897 unsigned long flags;
3910 int oldprio, on_rq; 3898 int oldprio, on_rq;
3911 struct rq *rq; 3899 struct rq *rq;
3912 u64 now;
3913 3900
3914 BUG_ON(prio < 0 || prio > MAX_PRIO); 3901 BUG_ON(prio < 0 || prio > MAX_PRIO);
3915 3902
3916 rq = task_rq_lock(p, &flags); 3903 rq = task_rq_lock(p, &flags);
3917 update_rq_clock(rq); 3904 update_rq_clock(rq);
3918 now = rq->clock;
3919 3905
3920 oldprio = p->prio; 3906 oldprio = p->prio;
3921 on_rq = p->se.on_rq; 3907 on_rq = p->se.on_rq;
@@ -3953,7 +3939,6 @@ void set_user_nice(struct task_struct *p, long nice)
3953 int old_prio, delta, on_rq; 3939 int old_prio, delta, on_rq;
3954 unsigned long flags; 3940 unsigned long flags;
3955 struct rq *rq; 3941 struct rq *rq;
3956 u64 now;
3957 3942
3958 if (TASK_NICE(p) == nice || nice < -20 || nice > 19) 3943 if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
3959 return; 3944 return;
@@ -3963,7 +3948,6 @@ void set_user_nice(struct task_struct *p, long nice)
3963 */ 3948 */
3964 rq = task_rq_lock(p, &flags); 3949 rq = task_rq_lock(p, &flags);
3965 update_rq_clock(rq); 3950 update_rq_clock(rq);
3966 now = rq->clock;
3967 /* 3951 /*
3968 * The RT priorities are set via sched_setscheduler(), but we still 3952 * The RT priorities are set via sched_setscheduler(), but we still
3969 * allow the 'normal' nice value to be set - but as expected 3953 * allow the 'normal' nice value to be set - but as expected
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 4a2cbde1057f..eb7ca49c3260 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -667,10 +667,8 @@ static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
667{ 667{
668 struct rq *rq = rq_of(cfs_rq); 668 struct rq *rq = rq_of(cfs_rq);
669 struct sched_entity *next; 669 struct sched_entity *next;
670 u64 now;
671 670
672 __update_rq_clock(rq); 671 __update_rq_clock(rq);
673 now = rq->clock;
674 672
675 /* 673 /*
676 * Dequeue and enqueue the task to update its 674 * Dequeue and enqueue the task to update its
@@ -820,10 +818,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
820static void yield_task_fair(struct rq *rq, struct task_struct *p) 818static void yield_task_fair(struct rq *rq, struct task_struct *p)
821{ 819{
822 struct cfs_rq *cfs_rq = task_cfs_rq(p); 820 struct cfs_rq *cfs_rq = task_cfs_rq(p);
823 u64 now;
824 821
825 __update_rq_clock(rq); 822 __update_rq_clock(rq);
826 now = rq->clock;
827 /* 823 /*
828 * Dequeue and enqueue the task to update its 824 * Dequeue and enqueue the task to update its
829 * position within the tree: 825 * position within the tree:
@@ -1062,11 +1058,9 @@ static void set_curr_task_fair(struct rq *rq)
1062{ 1058{
1063 struct task_struct *curr = rq->curr; 1059 struct task_struct *curr = rq->curr;
1064 struct sched_entity *se = &curr->se; 1060 struct sched_entity *se = &curr->se;
1065 u64 now;
1066 struct cfs_rq *cfs_rq; 1061 struct cfs_rq *cfs_rq;
1067 1062
1068 update_rq_clock(rq); 1063 update_rq_clock(rq);
1069 now = rq->clock;
1070 1064
1071 for_each_sched_entity(se) { 1065 for_each_sched_entity(se) {
1072 cfs_rq = cfs_rq_of(se); 1066 cfs_rq = cfs_rq_of(se);