diff options
author | Steve French <sfrench@us.ibm.com> | 2008-04-24 11:26:50 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2008-04-24 11:26:50 -0400 |
commit | 36d99df2fb474222ab47fbe8ae7385661033223b (patch) | |
tree | 962e068491b752a944f61c454fad3f8619a1ea3f /kernel/hrtimer.c | |
parent | 076d8423a98659a92837b07aa494cb74bfefe77c (diff) | |
parent | 3dc5063786b273f1aee545844f6bd4e9651ebffe (diff) |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 85 |
1 files changed, 41 insertions, 44 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 98bee013f71f..f78777abe769 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1238,51 +1238,50 @@ void hrtimer_run_pending(void) | |||
1238 | /* | 1238 | /* |
1239 | * Called from hardirq context every jiffy | 1239 | * Called from hardirq context every jiffy |
1240 | */ | 1240 | */ |
1241 | static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, | 1241 | void hrtimer_run_queues(void) |
1242 | int index) | ||
1243 | { | 1242 | { |
1244 | struct rb_node *node; | 1243 | struct rb_node *node; |
1245 | struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; | 1244 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1245 | struct hrtimer_clock_base *base; | ||
1246 | int index, gettime = 1; | ||
1246 | 1247 | ||
1247 | if (!base->first) | 1248 | if (hrtimer_hres_active()) |
1248 | return; | 1249 | return; |
1249 | 1250 | ||
1250 | if (base->get_softirq_time) | 1251 | for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { |
1251 | base->softirq_time = base->get_softirq_time(); | 1252 | base = &cpu_base->clock_base[index]; |
1252 | |||
1253 | spin_lock(&cpu_base->lock); | ||
1254 | 1253 | ||
1255 | while ((node = base->first)) { | 1254 | if (!base->first) |
1256 | struct hrtimer *timer; | ||
1257 | |||
1258 | timer = rb_entry(node, struct hrtimer, node); | ||
1259 | if (base->softirq_time.tv64 <= timer->expires.tv64) | ||
1260 | break; | ||
1261 | |||
1262 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { | ||
1263 | __remove_hrtimer(timer, base, HRTIMER_STATE_PENDING, 0); | ||
1264 | list_add_tail(&timer->cb_entry, | ||
1265 | &base->cpu_base->cb_pending); | ||
1266 | continue; | 1255 | continue; |
1256 | |||
1257 | if (base->get_softirq_time) | ||
1258 | base->softirq_time = base->get_softirq_time(); | ||
1259 | else if (gettime) { | ||
1260 | hrtimer_get_softirq_time(cpu_base); | ||
1261 | gettime = 0; | ||
1267 | } | 1262 | } |
1268 | 1263 | ||
1269 | __run_hrtimer(timer); | 1264 | spin_lock(&cpu_base->lock); |
1270 | } | ||
1271 | spin_unlock(&cpu_base->lock); | ||
1272 | } | ||
1273 | 1265 | ||
1274 | void hrtimer_run_queues(void) | 1266 | while ((node = base->first)) { |
1275 | { | 1267 | struct hrtimer *timer; |
1276 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | ||
1277 | int i; | ||
1278 | 1268 | ||
1279 | if (hrtimer_hres_active()) | 1269 | timer = rb_entry(node, struct hrtimer, node); |
1280 | return; | 1270 | if (base->softirq_time.tv64 <= timer->expires.tv64) |
1271 | break; | ||
1281 | 1272 | ||
1282 | hrtimer_get_softirq_time(cpu_base); | 1273 | if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) { |
1274 | __remove_hrtimer(timer, base, | ||
1275 | HRTIMER_STATE_PENDING, 0); | ||
1276 | list_add_tail(&timer->cb_entry, | ||
1277 | &base->cpu_base->cb_pending); | ||
1278 | continue; | ||
1279 | } | ||
1283 | 1280 | ||
1284 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1281 | __run_hrtimer(timer); |
1285 | run_hrtimer_queue(cpu_base, i); | 1282 | } |
1283 | spin_unlock(&cpu_base->lock); | ||
1284 | } | ||
1286 | } | 1285 | } |
1287 | 1286 | ||
1288 | /* | 1287 | /* |
@@ -1354,13 +1353,13 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |||
1354 | struct hrtimer_sleeper t; | 1353 | struct hrtimer_sleeper t; |
1355 | struct timespec __user *rmtp; | 1354 | struct timespec __user *rmtp; |
1356 | 1355 | ||
1357 | hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS); | 1356 | hrtimer_init(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS); |
1358 | t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; | 1357 | t.timer.expires.tv64 = restart->nanosleep.expires; |
1359 | 1358 | ||
1360 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) | 1359 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
1361 | return 0; | 1360 | return 0; |
1362 | 1361 | ||
1363 | rmtp = (struct timespec __user *)restart->arg1; | 1362 | rmtp = restart->nanosleep.rmtp; |
1364 | if (rmtp) { | 1363 | if (rmtp) { |
1365 | int ret = update_rmtp(&t.timer, rmtp); | 1364 | int ret = update_rmtp(&t.timer, rmtp); |
1366 | if (ret <= 0) | 1365 | if (ret <= 0) |
@@ -1394,10 +1393,9 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
1394 | 1393 | ||
1395 | restart = ¤t_thread_info()->restart_block; | 1394 | restart = ¤t_thread_info()->restart_block; |
1396 | restart->fn = hrtimer_nanosleep_restart; | 1395 | restart->fn = hrtimer_nanosleep_restart; |
1397 | restart->arg0 = (unsigned long) t.timer.base->index; | 1396 | restart->nanosleep.index = t.timer.base->index; |
1398 | restart->arg1 = (unsigned long) rmtp; | 1397 | restart->nanosleep.rmtp = rmtp; |
1399 | restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF; | 1398 | restart->nanosleep.expires = t.timer.expires.tv64; |
1400 | restart->arg3 = t.timer.expires.tv64 >> 32; | ||
1401 | 1399 | ||
1402 | return -ERESTART_RESTARTBLOCK; | 1400 | return -ERESTART_RESTARTBLOCK; |
1403 | } | 1401 | } |
@@ -1425,7 +1423,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1425 | int i; | 1423 | int i; |
1426 | 1424 | ||
1427 | spin_lock_init(&cpu_base->lock); | 1425 | spin_lock_init(&cpu_base->lock); |
1428 | lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); | ||
1429 | 1426 | ||
1430 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1427 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
1431 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1428 | cpu_base->clock_base[i].cpu_base = cpu_base; |
@@ -1466,16 +1463,16 @@ static void migrate_hrtimers(int cpu) | |||
1466 | tick_cancel_sched_timer(cpu); | 1463 | tick_cancel_sched_timer(cpu); |
1467 | 1464 | ||
1468 | local_irq_disable(); | 1465 | local_irq_disable(); |
1469 | double_spin_lock(&new_base->lock, &old_base->lock, | 1466 | spin_lock(&new_base->lock); |
1470 | smp_processor_id() < cpu); | 1467 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1471 | 1468 | ||
1472 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1469 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1473 | migrate_hrtimer_list(&old_base->clock_base[i], | 1470 | migrate_hrtimer_list(&old_base->clock_base[i], |
1474 | &new_base->clock_base[i]); | 1471 | &new_base->clock_base[i]); |
1475 | } | 1472 | } |
1476 | 1473 | ||
1477 | double_spin_unlock(&new_base->lock, &old_base->lock, | 1474 | spin_unlock(&old_base->lock); |
1478 | smp_processor_id() < cpu); | 1475 | spin_unlock(&new_base->lock); |
1479 | local_irq_enable(); | 1476 | local_irq_enable(); |
1480 | put_cpu_var(hrtimer_bases); | 1477 | put_cpu_var(hrtimer_bases); |
1481 | } | 1478 | } |