aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/stats.h
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-09-22 10:20:54 -0400
committerIngo Molnar <mingo@kernel.org>2013-09-25 07:51:06 -0400
commit4314895165623879937f46d767673654662b570c (patch)
treef2927ee7e93b788541dae19af0cb4070ce3d85c4 /kernel/sched/stats.h
parentf48627e686a69f5215cb0761e731edb3d9859dd9 (diff)
sched: Micro-optimize by dropping unnecessary task_rq() calls
We always know the rq used, let's just pass it around. This seems to cut the size of scheduler core down a tiny bit: Before: [linux]$ size kernel/sched/core.o.orig text data bss dec hex filename 62760 16130 3876 82766 1434e kernel/sched/core.o.orig After: [linux]$ size kernel/sched/core.o.patched text data bss dec hex filename 62566 16130 3876 82572 1428c kernel/sched/core.o.patched Probably speeds it up as well. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20130922142054.GA11499@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/stats.h')
-rw-r--r--kernel/sched/stats.h46
1 files changed, 24 insertions, 22 deletions
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index c7edee71bce8..4ab704339656 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -59,9 +59,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
59 * from dequeue_task() to account for possible rq->clock skew across cpus. The 59 * from dequeue_task() to account for possible rq->clock skew across cpus. The
60 * delta taken on each cpu would annul the skew. 60 * delta taken on each cpu would annul the skew.
61 */ 61 */
62static inline void sched_info_dequeued(struct task_struct *t) 62static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
63{ 63{
64 unsigned long long now = rq_clock(task_rq(t)), delta = 0; 64 unsigned long long now = rq_clock(rq), delta = 0;
65 65
66 if (unlikely(sched_info_on())) 66 if (unlikely(sched_info_on()))
67 if (t->sched_info.last_queued) 67 if (t->sched_info.last_queued)
@@ -69,7 +69,7 @@ static inline void sched_info_dequeued(struct task_struct *t)
69 sched_info_reset_dequeued(t); 69 sched_info_reset_dequeued(t);
70 t->sched_info.run_delay += delta; 70 t->sched_info.run_delay += delta;
71 71
72 rq_sched_info_dequeued(task_rq(t), delta); 72 rq_sched_info_dequeued(rq, delta);
73} 73}
74 74
75/* 75/*
@@ -77,9 +77,9 @@ static inline void sched_info_dequeued(struct task_struct *t)
77 * long it was waiting to run. We also note when it began so that we 77 * long it was waiting to run. We also note when it began so that we
78 * can keep stats on how long its timeslice is. 78 * can keep stats on how long its timeslice is.
79 */ 79 */
80static void sched_info_arrive(struct task_struct *t) 80static void sched_info_arrive(struct rq *rq, struct task_struct *t)
81{ 81{
82 unsigned long long now = rq_clock(task_rq(t)), delta = 0; 82 unsigned long long now = rq_clock(rq), delta = 0;
83 83
84 if (t->sched_info.last_queued) 84 if (t->sched_info.last_queued)
85 delta = now - t->sched_info.last_queued; 85 delta = now - t->sched_info.last_queued;
@@ -88,7 +88,7 @@ static void sched_info_arrive(struct task_struct *t)
88 t->sched_info.last_arrival = now; 88 t->sched_info.last_arrival = now;
89 t->sched_info.pcount++; 89 t->sched_info.pcount++;
90 90
91 rq_sched_info_arrive(task_rq(t), delta); 91 rq_sched_info_arrive(rq, delta);
92} 92}
93 93
94/* 94/*
@@ -96,11 +96,11 @@ static void sched_info_arrive(struct task_struct *t)
96 * the timestamp if it is already not set. It's assumed that 96 * the timestamp if it is already not set. It's assumed that
97 * sched_info_dequeued() will clear that stamp when appropriate. 97 * sched_info_dequeued() will clear that stamp when appropriate.
98 */ 98 */
99static inline void sched_info_queued(struct task_struct *t) 99static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
100{ 100{
101 if (unlikely(sched_info_on())) 101 if (unlikely(sched_info_on()))
102 if (!t->sched_info.last_queued) 102 if (!t->sched_info.last_queued)
103 t->sched_info.last_queued = rq_clock(task_rq(t)); 103 t->sched_info.last_queued = rq_clock(rq);
104} 104}
105 105
106/* 106/*
@@ -111,15 +111,15 @@ static inline void sched_info_queued(struct task_struct *t)
111 * sched_info_queued() to mark that it has now again started waiting on 111 * sched_info_queued() to mark that it has now again started waiting on
112 * the runqueue. 112 * the runqueue.
113 */ 113 */
114static inline void sched_info_depart(struct task_struct *t) 114static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
115{ 115{
116 unsigned long long delta = rq_clock(task_rq(t)) - 116 unsigned long long delta = rq_clock(rq) -
117 t->sched_info.last_arrival; 117 t->sched_info.last_arrival;
118 118
119 rq_sched_info_depart(task_rq(t), delta); 119 rq_sched_info_depart(rq, delta);
120 120
121 if (t->state == TASK_RUNNING) 121 if (t->state == TASK_RUNNING)
122 sched_info_queued(t); 122 sched_info_queued(rq, t);
123} 123}
124 124
125/* 125/*
@@ -128,32 +128,34 @@ static inline void sched_info_depart(struct task_struct *t)
128 * the idle task.) We are only called when prev != next. 128 * the idle task.) We are only called when prev != next.
129 */ 129 */
130static inline void 130static inline void
131__sched_info_switch(struct task_struct *prev, struct task_struct *next) 131__sched_info_switch(struct rq *rq,
132 struct task_struct *prev, struct task_struct *next)
132{ 133{
133 struct rq *rq = task_rq(prev);
134
135 /* 134 /*
136 * prev now departs the cpu. It's not interesting to record 135 * prev now departs the cpu. It's not interesting to record
137 * stats about how efficient we were at scheduling the idle 136 * stats about how efficient we were at scheduling the idle
138 * process, however. 137 * process, however.
139 */ 138 */
140 if (prev != rq->idle) 139 if (prev != rq->idle)
141 sched_info_depart(prev); 140 sched_info_depart(rq, prev);
142 141
143 if (next != rq->idle) 142 if (next != rq->idle)
144 sched_info_arrive(next); 143 sched_info_arrive(rq, next);
145} 144}
146static inline void 145static inline void
147sched_info_switch(struct task_struct *prev, struct task_struct *next) 146sched_info_switch(struct rq *rq,
147 struct task_struct *prev, struct task_struct *next)
148{ 148{
149 if (unlikely(sched_info_on())) 149 if (unlikely(sched_info_on()))
150 __sched_info_switch(prev, next); 150 __sched_info_switch(rq, prev, next);
151} 151}
152#else 152#else
153#define sched_info_queued(t) do { } while (0) 153#define sched_info_queued(rq, t) do { } while (0)
154#define sched_info_reset_dequeued(t) do { } while (0) 154#define sched_info_reset_dequeued(t) do { } while (0)
155#define sched_info_dequeued(t) do { } while (0) 155#define sched_info_dequeued(rq, t) do { } while (0)
156#define sched_info_switch(t, next) do { } while (0) 156#define sched_info_depart(rq, t) do { } while (0)
157#define sched_info_arrive(rq, next) do { } while (0)
158#define sched_info_switch(rq, t, next) do { } while (0)
157#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 159#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
158 160
159/* 161/*