diff options
| -rw-r--r-- | include/linux/sched.h | 4 | ||||
| -rw-r--r-- | kernel/futex.c | 30 | ||||
| -rw-r--r-- | kernel/kgdb.c | 6 | ||||
| -rw-r--r-- | kernel/softlockup.c | 15 |
4 files changed, 49 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index abdfacc58653..78efe7c485ac 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -310,6 +310,7 @@ extern void sched_show_task(struct task_struct *p); | |||
| 310 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 310 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
| 311 | extern void softlockup_tick(void); | 311 | extern void softlockup_tick(void); |
| 312 | extern void touch_softlockup_watchdog(void); | 312 | extern void touch_softlockup_watchdog(void); |
| 313 | extern void touch_softlockup_watchdog_sync(void); | ||
| 313 | extern void touch_all_softlockup_watchdogs(void); | 314 | extern void touch_all_softlockup_watchdogs(void); |
| 314 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | 315 | extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write, |
| 315 | void __user *buffer, | 316 | void __user *buffer, |
| @@ -323,6 +324,9 @@ static inline void softlockup_tick(void) | |||
| 323 | static inline void touch_softlockup_watchdog(void) | 324 | static inline void touch_softlockup_watchdog(void) |
| 324 | { | 325 | { |
| 325 | } | 326 | } |
| 327 | static inline void touch_softlockup_watchdog_sync(void) | ||
| 328 | { | ||
| 329 | } | ||
| 326 | static inline void touch_all_softlockup_watchdogs(void) | 330 | static inline void touch_all_softlockup_watchdogs(void) |
| 327 | { | 331 | { |
| 328 | } | 332 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index d9b3a2228f9d..e7a35f1039e7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -530,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
| 530 | return -EINVAL; | 530 | return -EINVAL; |
| 531 | 531 | ||
| 532 | WARN_ON(!atomic_read(&pi_state->refcount)); | 532 | WARN_ON(!atomic_read(&pi_state->refcount)); |
| 533 | WARN_ON(pid && pi_state->owner && | 533 | |
| 534 | pi_state->owner->pid != pid); | 534 | /* |
| 535 | * When pi_state->owner is NULL then the owner died | ||
| 536 | * and another waiter is on the fly. pi_state->owner | ||
| 537 | * is fixed up by the task which acquires | ||
| 538 | * pi_state->rt_mutex. | ||
| 539 | * | ||
| 540 | * We do not check for pid == 0 which can happen when | ||
| 541 | * the owner died and robust_list_exit() cleared the | ||
| 542 | * TID. | ||
| 543 | */ | ||
| 544 | if (pid && pi_state->owner) { | ||
| 545 | /* | ||
| 546 | * Bail out if user space manipulated the | ||
| 547 | * futex value. | ||
| 548 | */ | ||
| 549 | if (pid != task_pid_vnr(pi_state->owner)) | ||
| 550 | return -EINVAL; | ||
| 551 | } | ||
| 535 | 552 | ||
| 536 | atomic_inc(&pi_state->refcount); | 553 | atomic_inc(&pi_state->refcount); |
| 537 | *ps = pi_state; | 554 | *ps = pi_state; |
| @@ -758,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
| 758 | if (!pi_state) | 775 | if (!pi_state) |
| 759 | return -EINVAL; | 776 | return -EINVAL; |
| 760 | 777 | ||
| 778 | /* | ||
| 779 | * If current does not own the pi_state then the futex is | ||
| 780 | * inconsistent and user space fiddled with the futex value. | ||
| 781 | */ | ||
| 782 | if (pi_state->owner != current) | ||
| 783 | return -EINVAL; | ||
| 784 | |||
| 761 | raw_spin_lock(&pi_state->pi_mutex.wait_lock); | 785 | raw_spin_lock(&pi_state->pi_mutex.wait_lock); |
| 762 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 786 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
| 763 | 787 | ||
| @@ -1971,7 +1995,7 @@ retry_private: | |||
| 1971 | /* Unqueue and drop the lock */ | 1995 | /* Unqueue and drop the lock */ |
| 1972 | unqueue_me_pi(&q); | 1996 | unqueue_me_pi(&q); |
| 1973 | 1997 | ||
| 1974 | goto out; | 1998 | goto out_put_key; |
| 1975 | 1999 | ||
| 1976 | out_unlock_put_key: | 2000 | out_unlock_put_key: |
| 1977 | queue_unlock(&q, hb); | 2001 | queue_unlock(&q, hb); |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index c7ade62e4ef0..761fdd2b3034 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
| @@ -599,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs) | |||
| 599 | 599 | ||
| 600 | /* Signal the primary CPU that we are done: */ | 600 | /* Signal the primary CPU that we are done: */ |
| 601 | atomic_set(&cpu_in_kgdb[cpu], 0); | 601 | atomic_set(&cpu_in_kgdb[cpu], 0); |
| 602 | touch_softlockup_watchdog(); | 602 | touch_softlockup_watchdog_sync(); |
| 603 | clocksource_touch_watchdog(); | 603 | clocksource_touch_watchdog(); |
| 604 | local_irq_restore(flags); | 604 | local_irq_restore(flags); |
| 605 | } | 605 | } |
| @@ -1453,7 +1453,7 @@ acquirelock: | |||
| 1453 | (kgdb_info[cpu].task && | 1453 | (kgdb_info[cpu].task && |
| 1454 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | 1454 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { |
| 1455 | atomic_set(&kgdb_active, -1); | 1455 | atomic_set(&kgdb_active, -1); |
| 1456 | touch_softlockup_watchdog(); | 1456 | touch_softlockup_watchdog_sync(); |
| 1457 | clocksource_touch_watchdog(); | 1457 | clocksource_touch_watchdog(); |
| 1458 | local_irq_restore(flags); | 1458 | local_irq_restore(flags); |
| 1459 | 1459 | ||
| @@ -1553,7 +1553,7 @@ kgdb_restore: | |||
| 1553 | } | 1553 | } |
| 1554 | /* Free kgdb_active */ | 1554 | /* Free kgdb_active */ |
| 1555 | atomic_set(&kgdb_active, -1); | 1555 | atomic_set(&kgdb_active, -1); |
| 1556 | touch_softlockup_watchdog(); | 1556 | touch_softlockup_watchdog_sync(); |
| 1557 | clocksource_touch_watchdog(); | 1557 | clocksource_touch_watchdog(); |
| 1558 | local_irq_restore(flags); | 1558 | local_irq_restore(flags); |
| 1559 | 1559 | ||
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d22579087e27..0d4c7898ab80 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock); | |||
| 25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ | 25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ |
| 26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ | 26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ |
| 27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | 27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
| 28 | static DEFINE_PER_CPU(bool, softlock_touch_sync); | ||
| 28 | 29 | ||
| 29 | static int __read_mostly did_panic; | 30 | static int __read_mostly did_panic; |
| 30 | int __read_mostly softlockup_thresh = 60; | 31 | int __read_mostly softlockup_thresh = 60; |
| @@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void) | |||
| 79 | } | 80 | } |
| 80 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 81 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
| 81 | 82 | ||
| 83 | void touch_softlockup_watchdog_sync(void) | ||
| 84 | { | ||
| 85 | __raw_get_cpu_var(softlock_touch_sync) = true; | ||
| 86 | __raw_get_cpu_var(softlockup_touch_ts) = 0; | ||
| 87 | } | ||
| 88 | |||
| 82 | void touch_all_softlockup_watchdogs(void) | 89 | void touch_all_softlockup_watchdogs(void) |
| 83 | { | 90 | { |
| 84 | int cpu; | 91 | int cpu; |
| @@ -118,6 +125,14 @@ void softlockup_tick(void) | |||
| 118 | } | 125 | } |
| 119 | 126 | ||
| 120 | if (touch_ts == 0) { | 127 | if (touch_ts == 0) { |
| 128 | if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { | ||
| 129 | /* | ||
| 130 | * If the time stamp was touched atomically | ||
| 131 | * make sure the scheduler tick is up to date. | ||
| 132 | */ | ||
| 133 | per_cpu(softlock_touch_sync, this_cpu) = false; | ||
| 134 | sched_clock_tick(); | ||
| 135 | } | ||
| 121 | __touch_softlockup_watchdog(); | 136 | __touch_softlockup_watchdog(); |
| 122 | return; | 137 | return; |
| 123 | } | 138 | } |
