diff options
| -rw-r--r-- | Documentation/00-INDEX | 6 | ||||
| -rw-r--r-- | Documentation/timers/highres.txt (renamed from Documentation/hrtimers/highres.txt) | 0 | ||||
| -rw-r--r-- | Documentation/timers/hrtimers.txt (renamed from Documentation/hrtimers/hrtimers.txt) | 0 | ||||
| -rw-r--r-- | Documentation/timers/timer_stats.txt (renamed from Documentation/hrtimer/timer_stats.txt) | 0 | ||||
| -rw-r--r-- | include/linux/hrtimer.h | 2 | ||||
| -rw-r--r-- | include/linux/spinlock.h | 37 | ||||
| -rw-r--r-- | include/linux/thread_info.h | 12 | ||||
| -rw-r--r-- | kernel/compat.c | 15 | ||||
| -rw-r--r-- | kernel/hrtimer.c | 22 | ||||
| -rw-r--r-- | kernel/posix-cpu-timers.c | 30 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 18 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 6 | ||||
| -rw-r--r-- | kernel/timer.c | 16 | ||||
| -rw-r--r-- | kernel/workqueue.c | 2 |
14 files changed, 68 insertions, 98 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index e8fb24671967..f7923a42e769 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX | |||
| @@ -167,10 +167,8 @@ highuid.txt | |||
| 167 | - notes on the change from 16 bit to 32 bit user/group IDs. | 167 | - notes on the change from 16 bit to 32 bit user/group IDs. |
| 168 | hpet.txt | 168 | hpet.txt |
| 169 | - High Precision Event Timer Driver for Linux. | 169 | - High Precision Event Timer Driver for Linux. |
| 170 | hrtimer/ | 170 | timers/ |
| 171 | - info on the timer_stats debugging facility for timer (ab)use. | 171 | - info on the timer related topics |
| 172 | hrtimers/ | ||
| 173 | - info on the hrtimers subsystem for high-resolution kernel timers. | ||
| 174 | hw_random.txt | 172 | hw_random.txt |
| 175 | - info on Linux support for random number generator in i8xx chipsets. | 173 | - info on Linux support for random number generator in i8xx chipsets. |
| 176 | hwmon/ | 174 | hwmon/ |
diff --git a/Documentation/hrtimers/highres.txt b/Documentation/timers/highres.txt index a73ecf5b4bdb..a73ecf5b4bdb 100644 --- a/Documentation/hrtimers/highres.txt +++ b/Documentation/timers/highres.txt | |||
diff --git a/Documentation/hrtimers/hrtimers.txt b/Documentation/timers/hrtimers.txt index ce31f65e12e7..ce31f65e12e7 100644 --- a/Documentation/hrtimers/hrtimers.txt +++ b/Documentation/timers/hrtimers.txt | |||
diff --git a/Documentation/hrtimer/timer_stats.txt b/Documentation/timers/timer_stats.txt index 20d368c59814..20d368c59814 100644 --- a/Documentation/hrtimer/timer_stats.txt +++ b/Documentation/timers/timer_stats.txt | |||
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 1ad56a7b2f74..56f3236da829 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
| @@ -173,7 +173,6 @@ struct hrtimer_clock_base { | |||
| 173 | * struct hrtimer_cpu_base - the per cpu clock bases | 173 | * struct hrtimer_cpu_base - the per cpu clock bases |
| 174 | * @lock: lock protecting the base and associated clock bases | 174 | * @lock: lock protecting the base and associated clock bases |
| 175 | * and timers | 175 | * and timers |
| 176 | * @lock_key: the lock_class_key for use with lockdep | ||
| 177 | * @clock_base: array of clock bases for this cpu | 176 | * @clock_base: array of clock bases for this cpu |
| 178 | * @curr_timer: the timer which is executing a callback right now | 177 | * @curr_timer: the timer which is executing a callback right now |
| 179 | * @expires_next: absolute time of the next event which was scheduled | 178 | * @expires_next: absolute time of the next event which was scheduled |
| @@ -189,7 +188,6 @@ struct hrtimer_clock_base { | |||
| 189 | */ | 188 | */ |
| 190 | struct hrtimer_cpu_base { | 189 | struct hrtimer_cpu_base { |
| 191 | spinlock_t lock; | 190 | spinlock_t lock; |
| 192 | struct lock_class_key lock_key; | ||
| 193 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 191 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
| 194 | struct list_head cb_pending; | 192 | struct list_head cb_pending; |
| 195 | #ifdef CONFIG_HIGH_RES_TIMERS | 193 | #ifdef CONFIG_HIGH_RES_TIMERS |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 1129ee0a7180..d311a090fae7 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -296,43 +296,6 @@ do { \ | |||
| 296 | }) | 296 | }) |
| 297 | 297 | ||
| 298 | /* | 298 | /* |
| 299 | * Locks two spinlocks l1 and l2. | ||
| 300 | * l1_first indicates if spinlock l1 should be taken first. | ||
| 301 | */ | ||
| 302 | static inline void double_spin_lock(spinlock_t *l1, spinlock_t *l2, | ||
| 303 | bool l1_first) | ||
| 304 | __acquires(l1) | ||
| 305 | __acquires(l2) | ||
| 306 | { | ||
| 307 | if (l1_first) { | ||
| 308 | spin_lock(l1); | ||
| 309 | spin_lock(l2); | ||
| 310 | } else { | ||
| 311 | spin_lock(l2); | ||
| 312 | spin_lock(l1); | ||
| 313 | } | ||
| 314 | } | ||
| 315 | |||
| 316 | /* | ||
| 317 | * Unlocks two spinlocks l1 and l2. | ||
| 318 | * l1_taken_first indicates if spinlock l1 was taken first and therefore | ||
| 319 | * should be released after spinlock l2. | ||
| 320 | */ | ||
| 321 | static inline void double_spin_unlock(spinlock_t *l1, spinlock_t *l2, | ||
| 322 | bool l1_taken_first) | ||
| 323 | __releases(l1) | ||
| 324 | __releases(l2) | ||
| 325 | { | ||
| 326 | if (l1_taken_first) { | ||
| 327 | spin_unlock(l2); | ||
| 328 | spin_unlock(l1); | ||
| 329 | } else { | ||
| 330 | spin_unlock(l1); | ||
| 331 | spin_unlock(l2); | ||
| 332 | } | ||
| 333 | } | ||
| 334 | |||
| 335 | /* | ||
| 336 | * Pull the atomic_t declaration: | 299 | * Pull the atomic_t declaration: |
| 337 | * (asm-mips/atomic.h needs above definitions) | 300 | * (asm-mips/atomic.h needs above definitions) |
| 338 | */ | 301 | */ |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 421323e5a2d6..accd7bad35b0 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
| @@ -9,6 +9,9 @@ | |||
| 9 | 9 | ||
| 10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
| 11 | 11 | ||
| 12 | struct timespec; | ||
| 13 | struct compat_timespec; | ||
| 14 | |||
| 12 | /* | 15 | /* |
| 13 | * System call restart block. | 16 | * System call restart block. |
| 14 | */ | 17 | */ |
| @@ -26,6 +29,15 @@ struct restart_block { | |||
| 26 | u32 bitset; | 29 | u32 bitset; |
| 27 | u64 time; | 30 | u64 time; |
| 28 | } futex; | 31 | } futex; |
| 32 | /* For nanosleep */ | ||
| 33 | struct { | ||
| 34 | clockid_t index; | ||
| 35 | struct timespec __user *rmtp; | ||
| 36 | #ifdef CONFIG_COMPAT | ||
| 37 | struct compat_timespec __user *compat_rmtp; | ||
| 38 | #endif | ||
| 39 | u64 expires; | ||
| 40 | } nanosleep; | ||
| 29 | }; | 41 | }; |
| 30 | }; | 42 | }; |
| 31 | 43 | ||
diff --git a/kernel/compat.c b/kernel/compat.c index 5f0e201bcfd3..9c48abfcd4a5 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -47,15 +47,14 @@ static long compat_nanosleep_restart(struct restart_block *restart) | |||
| 47 | mm_segment_t oldfs; | 47 | mm_segment_t oldfs; |
| 48 | long ret; | 48 | long ret; |
| 49 | 49 | ||
| 50 | rmtp = (struct compat_timespec __user *)(restart->arg1); | 50 | restart->nanosleep.rmtp = (struct timespec __user *) &rmt; |
| 51 | restart->arg1 = (unsigned long)&rmt; | ||
| 52 | oldfs = get_fs(); | 51 | oldfs = get_fs(); |
| 53 | set_fs(KERNEL_DS); | 52 | set_fs(KERNEL_DS); |
| 54 | ret = hrtimer_nanosleep_restart(restart); | 53 | ret = hrtimer_nanosleep_restart(restart); |
| 55 | set_fs(oldfs); | 54 | set_fs(oldfs); |
| 56 | 55 | ||
| 57 | if (ret) { | 56 | if (ret) { |
| 58 | restart->arg1 = (unsigned long)rmtp; | 57 | rmtp = restart->nanosleep.compat_rmtp; |
| 59 | 58 | ||
| 60 | if (rmtp && put_compat_timespec(&rmt, rmtp)) | 59 | if (rmtp && put_compat_timespec(&rmt, rmtp)) |
| 61 | return -EFAULT; | 60 | return -EFAULT; |
| @@ -89,7 +88,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp, | |||
| 89 | = ¤t_thread_info()->restart_block; | 88 | = ¤t_thread_info()->restart_block; |
| 90 | 89 | ||
| 91 | restart->fn = compat_nanosleep_restart; | 90 | restart->fn = compat_nanosleep_restart; |
| 92 | restart->arg1 = (unsigned long)rmtp; | 91 | restart->nanosleep.compat_rmtp = rmtp; |
| 93 | 92 | ||
| 94 | if (rmtp && put_compat_timespec(&rmt, rmtp)) | 93 | if (rmtp && put_compat_timespec(&rmt, rmtp)) |
| 95 | return -EFAULT; | 94 | return -EFAULT; |
| @@ -607,9 +606,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) | |||
| 607 | long err; | 606 | long err; |
| 608 | mm_segment_t oldfs; | 607 | mm_segment_t oldfs; |
| 609 | struct timespec tu; | 608 | struct timespec tu; |
| 610 | struct compat_timespec *rmtp = (struct compat_timespec *)(restart->arg1); | 609 | struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp; |
| 611 | 610 | ||
| 612 | restart->arg1 = (unsigned long) &tu; | 611 | restart->nanosleep.rmtp = (struct timespec __user *) &tu; |
| 613 | oldfs = get_fs(); | 612 | oldfs = get_fs(); |
| 614 | set_fs(KERNEL_DS); | 613 | set_fs(KERNEL_DS); |
| 615 | err = clock_nanosleep_restart(restart); | 614 | err = clock_nanosleep_restart(restart); |
| @@ -621,7 +620,7 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart) | |||
| 621 | 620 | ||
| 622 | if (err == -ERESTART_RESTARTBLOCK) { | 621 | if (err == -ERESTART_RESTARTBLOCK) { |
| 623 | restart->fn = compat_clock_nanosleep_restart; | 622 | restart->fn = compat_clock_nanosleep_restart; |
| 624 | restart->arg1 = (unsigned long) rmtp; | 623 | restart->nanosleep.compat_rmtp = rmtp; |
| 625 | } | 624 | } |
| 626 | return err; | 625 | return err; |
| 627 | } | 626 | } |
| @@ -652,7 +651,7 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, | |||
| 652 | if (err == -ERESTART_RESTARTBLOCK) { | 651 | if (err == -ERESTART_RESTARTBLOCK) { |
| 653 | restart = ¤t_thread_info()->restart_block; | 652 | restart = ¤t_thread_info()->restart_block; |
| 654 | restart->fn = compat_clock_nanosleep_restart; | 653 | restart->fn = compat_clock_nanosleep_restart; |
| 655 | restart->arg1 = (unsigned long) rmtp; | 654 | restart->nanosleep.compat_rmtp = rmtp; |
| 656 | } | 655 | } |
| 657 | return err; | 656 | return err; |
| 658 | } | 657 | } |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 98bee013f71f..c642ef75069f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -1354,13 +1354,13 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |||
| 1354 | struct hrtimer_sleeper t; | 1354 | struct hrtimer_sleeper t; |
| 1355 | struct timespec __user *rmtp; | 1355 | struct timespec __user *rmtp; |
| 1356 | 1356 | ||
| 1357 | hrtimer_init(&t.timer, restart->arg0, HRTIMER_MODE_ABS); | 1357 | hrtimer_init(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS); |
| 1358 | t.timer.expires.tv64 = ((u64)restart->arg3 << 32) | (u64) restart->arg2; | 1358 | t.timer.expires.tv64 = restart->nanosleep.expires; |
| 1359 | 1359 | ||
| 1360 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) | 1360 | if (do_nanosleep(&t, HRTIMER_MODE_ABS)) |
| 1361 | return 0; | 1361 | return 0; |
| 1362 | 1362 | ||
| 1363 | rmtp = (struct timespec __user *)restart->arg1; | 1363 | rmtp = restart->nanosleep.rmtp; |
| 1364 | if (rmtp) { | 1364 | if (rmtp) { |
| 1365 | int ret = update_rmtp(&t.timer, rmtp); | 1365 | int ret = update_rmtp(&t.timer, rmtp); |
| 1366 | if (ret <= 0) | 1366 | if (ret <= 0) |
| @@ -1394,10 +1394,9 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
| 1394 | 1394 | ||
| 1395 | restart = ¤t_thread_info()->restart_block; | 1395 | restart = ¤t_thread_info()->restart_block; |
| 1396 | restart->fn = hrtimer_nanosleep_restart; | 1396 | restart->fn = hrtimer_nanosleep_restart; |
| 1397 | restart->arg0 = (unsigned long) t.timer.base->index; | 1397 | restart->nanosleep.index = t.timer.base->index; |
| 1398 | restart->arg1 = (unsigned long) rmtp; | 1398 | restart->nanosleep.rmtp = rmtp; |
| 1399 | restart->arg2 = t.timer.expires.tv64 & 0xFFFFFFFF; | 1399 | restart->nanosleep.expires = t.timer.expires.tv64; |
| 1400 | restart->arg3 = t.timer.expires.tv64 >> 32; | ||
| 1401 | 1400 | ||
| 1402 | return -ERESTART_RESTARTBLOCK; | 1401 | return -ERESTART_RESTARTBLOCK; |
| 1403 | } | 1402 | } |
| @@ -1425,7 +1424,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
| 1425 | int i; | 1424 | int i; |
| 1426 | 1425 | ||
| 1427 | spin_lock_init(&cpu_base->lock); | 1426 | spin_lock_init(&cpu_base->lock); |
| 1428 | lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); | ||
| 1429 | 1427 | ||
| 1430 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1428 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
| 1431 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1429 | cpu_base->clock_base[i].cpu_base = cpu_base; |
| @@ -1466,16 +1464,16 @@ static void migrate_hrtimers(int cpu) | |||
| 1466 | tick_cancel_sched_timer(cpu); | 1464 | tick_cancel_sched_timer(cpu); |
| 1467 | 1465 | ||
| 1468 | local_irq_disable(); | 1466 | local_irq_disable(); |
| 1469 | double_spin_lock(&new_base->lock, &old_base->lock, | 1467 | spin_lock(&new_base->lock); |
| 1470 | smp_processor_id() < cpu); | 1468 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
| 1471 | 1469 | ||
| 1472 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1470 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
| 1473 | migrate_hrtimer_list(&old_base->clock_base[i], | 1471 | migrate_hrtimer_list(&old_base->clock_base[i], |
| 1474 | &new_base->clock_base[i]); | 1472 | &new_base->clock_base[i]); |
| 1475 | } | 1473 | } |
| 1476 | 1474 | ||
| 1477 | double_spin_unlock(&new_base->lock, &old_base->lock, | 1475 | spin_unlock(&old_base->lock); |
| 1478 | smp_processor_id() < cpu); | 1476 | spin_unlock(&new_base->lock); |
| 1479 | local_irq_enable(); | 1477 | local_irq_enable(); |
| 1480 | put_cpu_var(hrtimer_bases); | 1478 | put_cpu_var(hrtimer_bases); |
| 1481 | } | 1479 | } |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 2eae91f954ca..ae5c6c147c4b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -1087,45 +1087,45 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1087 | maxfire = 20; | 1087 | maxfire = 20; |
| 1088 | prof_expires = cputime_zero; | 1088 | prof_expires = cputime_zero; |
| 1089 | while (!list_empty(timers)) { | 1089 | while (!list_empty(timers)) { |
| 1090 | struct cpu_timer_list *t = list_first_entry(timers, | 1090 | struct cpu_timer_list *tl = list_first_entry(timers, |
| 1091 | struct cpu_timer_list, | 1091 | struct cpu_timer_list, |
| 1092 | entry); | 1092 | entry); |
| 1093 | if (!--maxfire || cputime_lt(ptime, t->expires.cpu)) { | 1093 | if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { |
| 1094 | prof_expires = t->expires.cpu; | 1094 | prof_expires = tl->expires.cpu; |
| 1095 | break; | 1095 | break; |
| 1096 | } | 1096 | } |
| 1097 | t->firing = 1; | 1097 | tl->firing = 1; |
| 1098 | list_move_tail(&t->entry, firing); | 1098 | list_move_tail(&tl->entry, firing); |
| 1099 | } | 1099 | } |
| 1100 | 1100 | ||
| 1101 | ++timers; | 1101 | ++timers; |
| 1102 | maxfire = 20; | 1102 | maxfire = 20; |
| 1103 | virt_expires = cputime_zero; | 1103 | virt_expires = cputime_zero; |
| 1104 | while (!list_empty(timers)) { | 1104 | while (!list_empty(timers)) { |
| 1105 | struct cpu_timer_list *t = list_first_entry(timers, | 1105 | struct cpu_timer_list *tl = list_first_entry(timers, |
| 1106 | struct cpu_timer_list, | 1106 | struct cpu_timer_list, |
| 1107 | entry); | 1107 | entry); |
| 1108 | if (!--maxfire || cputime_lt(utime, t->expires.cpu)) { | 1108 | if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { |
| 1109 | virt_expires = t->expires.cpu; | 1109 | virt_expires = tl->expires.cpu; |
| 1110 | break; | 1110 | break; |
| 1111 | } | 1111 | } |
| 1112 | t->firing = 1; | 1112 | tl->firing = 1; |
| 1113 | list_move_tail(&t->entry, firing); | 1113 | list_move_tail(&tl->entry, firing); |
| 1114 | } | 1114 | } |
| 1115 | 1115 | ||
| 1116 | ++timers; | 1116 | ++timers; |
| 1117 | maxfire = 20; | 1117 | maxfire = 20; |
| 1118 | sched_expires = 0; | 1118 | sched_expires = 0; |
| 1119 | while (!list_empty(timers)) { | 1119 | while (!list_empty(timers)) { |
| 1120 | struct cpu_timer_list *t = list_first_entry(timers, | 1120 | struct cpu_timer_list *tl = list_first_entry(timers, |
| 1121 | struct cpu_timer_list, | 1121 | struct cpu_timer_list, |
| 1122 | entry); | 1122 | entry); |
| 1123 | if (!--maxfire || sum_sched_runtime < t->expires.sched) { | 1123 | if (!--maxfire || sum_sched_runtime < tl->expires.sched) { |
| 1124 | sched_expires = t->expires.sched; | 1124 | sched_expires = tl->expires.sched; |
| 1125 | break; | 1125 | break; |
| 1126 | } | 1126 | } |
| 1127 | t->firing = 1; | 1127 | tl->firing = 1; |
| 1128 | list_move_tail(&t->entry, firing); | 1128 | list_move_tail(&tl->entry, firing); |
| 1129 | } | 1129 | } |
| 1130 | 1130 | ||
| 1131 | /* | 1131 | /* |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f61402b1f2d0..73961f35fdc8 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -141,8 +141,16 @@ static void clocksource_watchdog(unsigned long data) | |||
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | if (!list_empty(&watchdog_list)) { | 143 | if (!list_empty(&watchdog_list)) { |
| 144 | __mod_timer(&watchdog_timer, | 144 | /* |
| 145 | watchdog_timer.expires + WATCHDOG_INTERVAL); | 145 | * Cycle through CPUs to check if the CPUs stay |
| 146 | * synchronized to each other. | ||
| 147 | */ | ||
| 148 | int next_cpu = next_cpu(raw_smp_processor_id(), cpu_online_map); | ||
| 149 | |||
| 150 | if (next_cpu >= NR_CPUS) | ||
| 151 | next_cpu = first_cpu(cpu_online_map); | ||
| 152 | watchdog_timer.expires += WATCHDOG_INTERVAL; | ||
| 153 | add_timer_on(&watchdog_timer, next_cpu); | ||
| 146 | } | 154 | } |
| 147 | spin_unlock(&watchdog_lock); | 155 | spin_unlock(&watchdog_lock); |
| 148 | } | 156 | } |
| @@ -164,7 +172,8 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
| 164 | if (!started && watchdog) { | 172 | if (!started && watchdog) { |
| 165 | watchdog_last = watchdog->read(); | 173 | watchdog_last = watchdog->read(); |
| 166 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; | 174 | watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; |
| 167 | add_timer(&watchdog_timer); | 175 | add_timer_on(&watchdog_timer, |
| 176 | first_cpu(cpu_online_map)); | ||
| 168 | } | 177 | } |
| 169 | } else { | 178 | } else { |
| 170 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 179 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
| @@ -185,7 +194,8 @@ static void clocksource_check_watchdog(struct clocksource *cs) | |||
| 185 | watchdog_last = watchdog->read(); | 194 | watchdog_last = watchdog->read(); |
| 186 | watchdog_timer.expires = | 195 | watchdog_timer.expires = |
| 187 | jiffies + WATCHDOG_INTERVAL; | 196 | jiffies + WATCHDOG_INTERVAL; |
| 188 | add_timer(&watchdog_timer); | 197 | add_timer_on(&watchdog_timer, |
| 198 | first_cpu(cpu_online_map)); | ||
| 189 | } | 199 | } |
| 190 | } | 200 | } |
| 191 | } | 201 | } |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 686da821d376..69dba0c71727 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -158,9 +158,8 @@ void tick_nohz_stop_idle(int cpu) | |||
| 158 | } | 158 | } |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | static ktime_t tick_nohz_start_idle(int cpu) | 161 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) |
| 162 | { | 162 | { |
| 163 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | ||
| 164 | ktime_t now, delta; | 163 | ktime_t now, delta; |
| 165 | 164 | ||
| 166 | now = ktime_get(); | 165 | now = ktime_get(); |
| @@ -201,8 +200,8 @@ void tick_nohz_stop_sched_tick(void) | |||
| 201 | local_irq_save(flags); | 200 | local_irq_save(flags); |
| 202 | 201 | ||
| 203 | cpu = smp_processor_id(); | 202 | cpu = smp_processor_id(); |
| 204 | now = tick_nohz_start_idle(cpu); | ||
| 205 | ts = &per_cpu(tick_cpu_sched, cpu); | 203 | ts = &per_cpu(tick_cpu_sched, cpu); |
| 204 | now = tick_nohz_start_idle(ts); | ||
| 206 | 205 | ||
| 207 | /* | 206 | /* |
| 208 | * If this cpu is offline and it is the one which updates | 207 | * If this cpu is offline and it is the one which updates |
| @@ -222,7 +221,6 @@ void tick_nohz_stop_sched_tick(void) | |||
| 222 | if (need_resched()) | 221 | if (need_resched()) |
| 223 | goto end; | 222 | goto end; |
| 224 | 223 | ||
| 225 | cpu = smp_processor_id(); | ||
| 226 | if (unlikely(local_softirq_pending())) { | 224 | if (unlikely(local_softirq_pending())) { |
| 227 | static int ratelimit; | 225 | static int ratelimit; |
| 228 | 226 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index b024106daa70..f3d35d4ea42e 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1228,13 +1228,6 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info) | |||
| 1228 | return 0; | 1228 | return 0; |
| 1229 | } | 1229 | } |
| 1230 | 1230 | ||
| 1231 | /* | ||
| 1232 | * lockdep: we want to track each per-CPU base as a separate lock-class, | ||
| 1233 | * but timer-bases are kmalloc()-ed, so we need to attach separate | ||
| 1234 | * keys to them: | ||
| 1235 | */ | ||
| 1236 | static struct lock_class_key base_lock_keys[NR_CPUS]; | ||
| 1237 | |||
| 1238 | static int __cpuinit init_timers_cpu(int cpu) | 1231 | static int __cpuinit init_timers_cpu(int cpu) |
| 1239 | { | 1232 | { |
| 1240 | int j; | 1233 | int j; |
| @@ -1277,7 +1270,6 @@ static int __cpuinit init_timers_cpu(int cpu) | |||
| 1277 | } | 1270 | } |
| 1278 | 1271 | ||
| 1279 | spin_lock_init(&base->lock); | 1272 | spin_lock_init(&base->lock); |
| 1280 | lockdep_set_class(&base->lock, base_lock_keys + cpu); | ||
| 1281 | 1273 | ||
| 1282 | for (j = 0; j < TVN_SIZE; j++) { | 1274 | for (j = 0; j < TVN_SIZE; j++) { |
| 1283 | INIT_LIST_HEAD(base->tv5.vec + j); | 1275 | INIT_LIST_HEAD(base->tv5.vec + j); |
| @@ -1316,8 +1308,8 @@ static void __cpuinit migrate_timers(int cpu) | |||
| 1316 | new_base = get_cpu_var(tvec_bases); | 1308 | new_base = get_cpu_var(tvec_bases); |
| 1317 | 1309 | ||
| 1318 | local_irq_disable(); | 1310 | local_irq_disable(); |
| 1319 | double_spin_lock(&new_base->lock, &old_base->lock, | 1311 | spin_lock(&new_base->lock); |
| 1320 | smp_processor_id() < cpu); | 1312 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
| 1321 | 1313 | ||
| 1322 | BUG_ON(old_base->running_timer); | 1314 | BUG_ON(old_base->running_timer); |
| 1323 | 1315 | ||
| @@ -1330,8 +1322,8 @@ static void __cpuinit migrate_timers(int cpu) | |||
| 1330 | migrate_timer_list(new_base, old_base->tv5.vec + i); | 1322 | migrate_timer_list(new_base, old_base->tv5.vec + i); |
| 1331 | } | 1323 | } |
| 1332 | 1324 | ||
| 1333 | double_spin_unlock(&new_base->lock, &old_base->lock, | 1325 | spin_unlock(&old_base->lock); |
| 1334 | smp_processor_id() < cpu); | 1326 | spin_unlock(&new_base->lock); |
| 1335 | local_irq_enable(); | 1327 | local_irq_enable(); |
| 1336 | put_cpu_var(tvec_bases); | 1328 | put_cpu_var(tvec_bases); |
| 1337 | } | 1329 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index ff06611655af..00ff4d08e370 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -219,6 +219,7 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 219 | struct timer_list *timer = &dwork->timer; | 219 | struct timer_list *timer = &dwork->timer; |
| 220 | struct work_struct *work = &dwork->work; | 220 | struct work_struct *work = &dwork->work; |
| 221 | 221 | ||
| 222 | timer_stats_timer_set_start_info(&dwork->timer); | ||
| 222 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { | 223 | if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { |
| 223 | BUG_ON(timer_pending(timer)); | 224 | BUG_ON(timer_pending(timer)); |
| 224 | BUG_ON(!list_empty(&work->entry)); | 225 | BUG_ON(!list_empty(&work->entry)); |
| @@ -580,6 +581,7 @@ EXPORT_SYMBOL(schedule_delayed_work); | |||
| 580 | int schedule_delayed_work_on(int cpu, | 581 | int schedule_delayed_work_on(int cpu, |
| 581 | struct delayed_work *dwork, unsigned long delay) | 582 | struct delayed_work *dwork, unsigned long delay) |
| 582 | { | 583 | { |
| 584 | timer_stats_timer_set_start_info(&dwork->timer); | ||
| 583 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); | 585 | return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); |
| 584 | } | 586 | } |
| 585 | EXPORT_SYMBOL(schedule_delayed_work_on); | 587 | EXPORT_SYMBOL(schedule_delayed_work_on); |
