aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDimitri Sivanich <sivanich@sgi.com>2008-04-18 16:39:00 -0400
committerThomas Gleixner <tglx@linutronix.de>2008-04-21 01:59:51 -0400
commit833883d9ac4cfb31c1c4419335e68e6895a05b6b (patch)
tree8266c8216aa988aedb6a25925d99c9e269467e7e /kernel
parent833df317f9ada91488898b005f4641bb674a3bae (diff)
hrtimer: reduce calls to hrtimer_get_softirq_time()
It seems that hrtimer_run_queues() is calling hrtimer_get_softirq_time() more often than it needs to. This can cause frequent contention on systems with large numbers of processors/cores. With this patch, hrtimer_run_queues only calls hrtimer_get_softirq_time() if there is a pending timer in one of the hrtimer bases, and only once. This also combines hrtimer_run_queues() and the inline run_hrtimer_queue() into one function. [ tglx@linutronix.de: coding style ] Signed-off-by: Dimitri Sivanich <sivanich@sgi.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c64
1 files changed, 32 insertions, 32 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index c642ef75069f..70d4adc74639 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1238,51 +1238,51 @@ void hrtimer_run_pending(void)
1238/* 1238/*
1239 * Called from hardirq context every jiffy 1239 * Called from hardirq context every jiffy
1240 */ 1240 */
1241static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, 1241void hrtimer_run_queues(void)
1242 int index)
1243{ 1242{
1244 struct rb_node *node; 1243 struct rb_node *node;
1245 struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; 1244 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1245 struct hrtimer_clock_base *base;
1246 int index, gettime = 1;
1246 1247
1247 if (!base->first) 1248 if (hrtimer_hres_active())
1248 return; 1249 return;
1249 1250
1250 if (base->get_softirq_time) 1251 for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
1251 base->softirq_time = base->get_softirq_time(); 1252 base = &cpu_base->clock_base[index];
1252
1253 spin_lock(&cpu_base->lock);
1254 1253
1255 while ((node = base->first)) { 1254 if (!base->first)
1256 struct hrtimer *timer;
1257
1258 timer = rb_entry(node, struct hrtimer, node);
1259 if (base->softirq_time.tv64 <= timer->expires.tv64)
1260 break;
1261
1262 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1263 __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0);
1264 list_add_tail(&timer->cb_entry,
1265 &base->cpu_base->cb_pending);
1266 continue; 1255 continue;
1256
1257 if (gettime) {
1258 hrtimer_get_softirq_time(cpu_base);
1259 gettime = 0;
1267 } 1260 }
1268 1261
1269 __run_hrtimer(timer); 1262 if (base->get_softirq_time)
1270 } 1263 base->softirq_time = base->get_softirq_time();
1271 spin_unlock(&cpu_base->lock);
1272}
1273 1264
1274void hrtimer_run_queues(void) 1265 spin_lock(&cpu_base->lock);
1275{
1276 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1277 int i;
1278 1266
1279 if (hrtimer_hres_active()) 1267 while ((node = base->first)) {
1280 return; 1268 struct hrtimer *timer;
1281 1269
1282 hrtimer_get_softirq_time(cpu_base); 1270 timer = rb_entry(node, struct hrtimer, node);
1271 if (base->softirq_time.tv64 <= timer->expires.tv64)
1272 break;
1283 1273
1284 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1274 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1285 run_hrtimer_queue(cpu_base, i); 1275 __remove_hrtimer(timer, base,
1276 HRTIMER_STATE_PENDING, 0);
1277 list_add_tail(&timer->cb_entry,
1278 &base->cpu_base->cb_pending);
1279 continue;
1280 }
1281
1282 __run_hrtimer(timer);
1283 }
1284 spin_unlock(&cpu_base->lock);
1285 }
1286} 1286}
1287 1287
1288/* 1288/*