diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/compat.c | 35 | ||||
| -rw-r--r-- | kernel/cpu.c | 38 | ||||
| -rw-r--r-- | kernel/delayacct.c | 15 | ||||
| -rw-r--r-- | kernel/exit.c | 1 | ||||
| -rw-r--r-- | kernel/fork.c | 7 | ||||
| -rw-r--r-- | kernel/futex.c | 7 | ||||
| -rw-r--r-- | kernel/irq/chip.c | 35 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 9 | ||||
| -rw-r--r-- | kernel/irq/proc.c | 2 | ||||
| -rw-r--r-- | kernel/irq/spurious.c | 6 | ||||
| -rw-r--r-- | kernel/lockdep.c | 26 | ||||
| -rw-r--r-- | kernel/module.c | 92 | ||||
| -rw-r--r-- | kernel/mutex-debug.c | 2 | ||||
| -rw-r--r-- | kernel/nsproxy.c | 6 | ||||
| -rw-r--r-- | kernel/posix-cpu-timers.c | 27 | ||||
| -rw-r--r-- | kernel/power/disk.c | 45 | ||||
| -rw-r--r-- | kernel/power/swap.c | 3 | ||||
| -rw-r--r-- | kernel/power/user.c | 8 | ||||
| -rw-r--r-- | kernel/printk.c | 32 | ||||
| -rw-r--r-- | kernel/profile.c | 2 | ||||
| -rw-r--r-- | kernel/sched.c | 24 | ||||
| -rw-r--r-- | kernel/signal.c | 15 | ||||
| -rw-r--r-- | kernel/sys_ni.c | 2 | ||||
| -rw-r--r-- | kernel/sysctl.c | 34 | ||||
| -rw-r--r-- | kernel/taskstats.c | 87 | ||||
| -rw-r--r-- | kernel/time/jiffies.c | 2 | ||||
| -rw-r--r-- | kernel/time/ntp.c | 2 | ||||
| -rw-r--r-- | kernel/tsacct.c | 17 | ||||
| -rw-r--r-- | kernel/unwind.c | 318 | ||||
| -rw-r--r-- | kernel/user.c | 11 | ||||
| -rw-r--r-- | kernel/workqueue.c | 13 |
31 files changed, 663 insertions, 260 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 75573e5d27b0..6952dd057300 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -678,7 +678,7 @@ int get_compat_sigevent(struct sigevent *event, | |||
| 678 | ? -EFAULT : 0; | 678 | ? -EFAULT : 0; |
| 679 | } | 679 | } |
| 680 | 680 | ||
| 681 | long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask, | 681 | long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, |
| 682 | unsigned long bitmap_size) | 682 | unsigned long bitmap_size) |
| 683 | { | 683 | { |
| 684 | int i, j; | 684 | int i, j; |
| @@ -982,4 +982,37 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, | |||
| 982 | } | 982 | } |
| 983 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); | 983 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); |
| 984 | } | 984 | } |
| 985 | |||
| 986 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, | ||
| 987 | compat_ulong_t maxnode, | ||
| 988 | const compat_ulong_t __user *old_nodes, | ||
| 989 | const compat_ulong_t __user *new_nodes) | ||
| 990 | { | ||
| 991 | unsigned long __user *old = NULL; | ||
| 992 | unsigned long __user *new = NULL; | ||
| 993 | nodemask_t tmp_mask; | ||
| 994 | unsigned long nr_bits; | ||
| 995 | unsigned long size; | ||
| 996 | |||
| 997 | nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); | ||
| 998 | size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | ||
| 999 | if (old_nodes) { | ||
| 1000 | if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) | ||
| 1001 | return -EFAULT; | ||
| 1002 | old = compat_alloc_user_space(new_nodes ? size * 2 : size); | ||
| 1003 | if (new_nodes) | ||
| 1004 | new = old + size / sizeof(unsigned long); | ||
| 1005 | if (copy_to_user(old, nodes_addr(tmp_mask), size)) | ||
| 1006 | return -EFAULT; | ||
| 1007 | } | ||
| 1008 | if (new_nodes) { | ||
| 1009 | if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) | ||
| 1010 | return -EFAULT; | ||
| 1011 | if (new == NULL) | ||
| 1012 | new = compat_alloc_user_space(size); | ||
| 1013 | if (copy_to_user(new, nodes_addr(tmp_mask), size)) | ||
| 1014 | return -EFAULT; | ||
| 1015 | } | ||
| 1016 | return sys_migrate_pages(pid, nr_bits + 1, old, new); | ||
| 1017 | } | ||
| 985 | #endif | 1018 | #endif |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 32c96628463e..272254f20d97 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | static DEFINE_MUTEX(cpu_add_remove_lock); | 19 | static DEFINE_MUTEX(cpu_add_remove_lock); |
| 20 | static DEFINE_MUTEX(cpu_bitmask_lock); | 20 | static DEFINE_MUTEX(cpu_bitmask_lock); |
| 21 | 21 | ||
| 22 | static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
| 23 | 23 | ||
| 24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. | 24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
| 25 | * Should always be manipulated under cpu_add_remove_lock | 25 | * Should always be manipulated under cpu_add_remove_lock |
| @@ -58,8 +58,8 @@ void unlock_cpu_hotplug(void) | |||
| 58 | recursive_depth--; | 58 | recursive_depth--; |
| 59 | return; | 59 | return; |
| 60 | } | 60 | } |
| 61 | mutex_unlock(&cpu_bitmask_lock); | ||
| 62 | recursive = NULL; | 61 | recursive = NULL; |
| 62 | mutex_unlock(&cpu_bitmask_lock); | ||
| 63 | } | 63 | } |
| 64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | 64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); |
| 65 | 65 | ||
| @@ -68,7 +68,11 @@ EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | |||
| 68 | /* Need to know about CPUs going up/down? */ | 68 | /* Need to know about CPUs going up/down? */ |
| 69 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) | 69 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) |
| 70 | { | 70 | { |
| 71 | return blocking_notifier_chain_register(&cpu_chain, nb); | 71 | int ret; |
| 72 | mutex_lock(&cpu_add_remove_lock); | ||
| 73 | ret = raw_notifier_chain_register(&cpu_chain, nb); | ||
| 74 | mutex_unlock(&cpu_add_remove_lock); | ||
| 75 | return ret; | ||
| 72 | } | 76 | } |
| 73 | 77 | ||
| 74 | #ifdef CONFIG_HOTPLUG_CPU | 78 | #ifdef CONFIG_HOTPLUG_CPU |
| @@ -77,7 +81,9 @@ EXPORT_SYMBOL(register_cpu_notifier); | |||
| 77 | 81 | ||
| 78 | void unregister_cpu_notifier(struct notifier_block *nb) | 82 | void unregister_cpu_notifier(struct notifier_block *nb) |
| 79 | { | 83 | { |
| 80 | blocking_notifier_chain_unregister(&cpu_chain, nb); | 84 | mutex_lock(&cpu_add_remove_lock); |
| 85 | raw_notifier_chain_unregister(&cpu_chain, nb); | ||
| 86 | mutex_unlock(&cpu_add_remove_lock); | ||
| 81 | } | 87 | } |
| 82 | EXPORT_SYMBOL(unregister_cpu_notifier); | 88 | EXPORT_SYMBOL(unregister_cpu_notifier); |
| 83 | 89 | ||
| @@ -126,7 +132,7 @@ static int _cpu_down(unsigned int cpu) | |||
| 126 | if (!cpu_online(cpu)) | 132 | if (!cpu_online(cpu)) |
| 127 | return -EINVAL; | 133 | return -EINVAL; |
| 128 | 134 | ||
| 129 | err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 135 | err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
| 130 | (void *)(long)cpu); | 136 | (void *)(long)cpu); |
| 131 | if (err == NOTIFY_BAD) { | 137 | if (err == NOTIFY_BAD) { |
| 132 | printk("%s: attempt to take down CPU %u failed\n", | 138 | printk("%s: attempt to take down CPU %u failed\n", |
| @@ -144,18 +150,18 @@ static int _cpu_down(unsigned int cpu) | |||
| 144 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | 150 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
| 145 | mutex_unlock(&cpu_bitmask_lock); | 151 | mutex_unlock(&cpu_bitmask_lock); |
| 146 | 152 | ||
| 147 | if (IS_ERR(p)) { | 153 | if (IS_ERR(p) || cpu_online(cpu)) { |
| 148 | /* CPU didn't die: tell everyone. Can't complain. */ | 154 | /* CPU didn't die: tell everyone. Can't complain. */ |
| 149 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 155 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
| 150 | (void *)(long)cpu) == NOTIFY_BAD) | 156 | (void *)(long)cpu) == NOTIFY_BAD) |
| 151 | BUG(); | 157 | BUG(); |
| 152 | 158 | ||
| 153 | err = PTR_ERR(p); | 159 | if (IS_ERR(p)) { |
| 154 | goto out_allowed; | 160 | err = PTR_ERR(p); |
| 155 | } | 161 | goto out_allowed; |
| 156 | 162 | } | |
| 157 | if (cpu_online(cpu)) | ||
| 158 | goto out_thread; | 163 | goto out_thread; |
| 164 | } | ||
| 159 | 165 | ||
| 160 | /* Wait for it to sleep (leaving idle task). */ | 166 | /* Wait for it to sleep (leaving idle task). */ |
| 161 | while (!idle_cpu(cpu)) | 167 | while (!idle_cpu(cpu)) |
| @@ -169,7 +175,7 @@ static int _cpu_down(unsigned int cpu) | |||
| 169 | put_cpu(); | 175 | put_cpu(); |
| 170 | 176 | ||
| 171 | /* CPU is completely dead: tell everyone. Too late to complain. */ | 177 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
| 172 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD, | 178 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD, |
| 173 | (void *)(long)cpu) == NOTIFY_BAD) | 179 | (void *)(long)cpu) == NOTIFY_BAD) |
| 174 | BUG(); | 180 | BUG(); |
| 175 | 181 | ||
| @@ -206,7 +212,7 @@ static int __devinit _cpu_up(unsigned int cpu) | |||
| 206 | if (cpu_online(cpu) || !cpu_present(cpu)) | 212 | if (cpu_online(cpu) || !cpu_present(cpu)) |
| 207 | return -EINVAL; | 213 | return -EINVAL; |
| 208 | 214 | ||
| 209 | ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 215 | ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
| 210 | if (ret == NOTIFY_BAD) { | 216 | if (ret == NOTIFY_BAD) { |
| 211 | printk("%s: attempt to bring up CPU %u failed\n", | 217 | printk("%s: attempt to bring up CPU %u failed\n", |
| 212 | __FUNCTION__, cpu); | 218 | __FUNCTION__, cpu); |
| @@ -223,11 +229,11 @@ static int __devinit _cpu_up(unsigned int cpu) | |||
| 223 | BUG_ON(!cpu_online(cpu)); | 229 | BUG_ON(!cpu_online(cpu)); |
| 224 | 230 | ||
| 225 | /* Now call notifier in preparation. */ | 231 | /* Now call notifier in preparation. */ |
| 226 | blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); | 232 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); |
| 227 | 233 | ||
| 228 | out_notify: | 234 | out_notify: |
| 229 | if (ret != 0) | 235 | if (ret != 0) |
| 230 | blocking_notifier_call_chain(&cpu_chain, | 236 | raw_notifier_call_chain(&cpu_chain, |
| 231 | CPU_UP_CANCELED, hcpu); | 237 | CPU_UP_CANCELED, hcpu); |
| 232 | 238 | ||
| 233 | return ret; | 239 | return ret; |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 36752f124c6a..66a0ea48751d 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
| @@ -66,6 +66,7 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
| 66 | { | 66 | { |
| 67 | struct timespec ts; | 67 | struct timespec ts; |
| 68 | s64 ns; | 68 | s64 ns; |
| 69 | unsigned long flags; | ||
| 69 | 70 | ||
| 70 | do_posix_clock_monotonic_gettime(end); | 71 | do_posix_clock_monotonic_gettime(end); |
| 71 | ts = timespec_sub(*end, *start); | 72 | ts = timespec_sub(*end, *start); |
| @@ -73,10 +74,10 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
| 73 | if (ns < 0) | 74 | if (ns < 0) |
| 74 | return; | 75 | return; |
| 75 | 76 | ||
| 76 | spin_lock(¤t->delays->lock); | 77 | spin_lock_irqsave(¤t->delays->lock, flags); |
| 77 | *total += ns; | 78 | *total += ns; |
| 78 | (*count)++; | 79 | (*count)++; |
| 79 | spin_unlock(¤t->delays->lock); | 80 | spin_unlock_irqrestore(¤t->delays->lock, flags); |
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | void __delayacct_blkio_start(void) | 83 | void __delayacct_blkio_start(void) |
| @@ -104,6 +105,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
| 104 | s64 tmp; | 105 | s64 tmp; |
| 105 | struct timespec ts; | 106 | struct timespec ts; |
| 106 | unsigned long t1,t2,t3; | 107 | unsigned long t1,t2,t3; |
| 108 | unsigned long flags; | ||
| 107 | 109 | ||
| 108 | /* Though tsk->delays accessed later, early exit avoids | 110 | /* Though tsk->delays accessed later, early exit avoids |
| 109 | * unnecessary returning of other data | 111 | * unnecessary returning of other data |
| @@ -136,14 +138,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
| 136 | 138 | ||
| 137 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ | 139 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ |
| 138 | 140 | ||
| 139 | spin_lock(&tsk->delays->lock); | 141 | spin_lock_irqsave(&tsk->delays->lock, flags); |
| 140 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; | 142 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; |
| 141 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; | 143 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; |
| 142 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; | 144 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; |
| 143 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; | 145 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; |
| 144 | d->blkio_count += tsk->delays->blkio_count; | 146 | d->blkio_count += tsk->delays->blkio_count; |
| 145 | d->swapin_count += tsk->delays->swapin_count; | 147 | d->swapin_count += tsk->delays->swapin_count; |
| 146 | spin_unlock(&tsk->delays->lock); | 148 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
| 147 | 149 | ||
| 148 | done: | 150 | done: |
| 149 | return 0; | 151 | return 0; |
| @@ -152,11 +154,12 @@ done: | |||
| 152 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) | 154 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) |
| 153 | { | 155 | { |
| 154 | __u64 ret; | 156 | __u64 ret; |
| 157 | unsigned long flags; | ||
| 155 | 158 | ||
| 156 | spin_lock(&tsk->delays->lock); | 159 | spin_lock_irqsave(&tsk->delays->lock, flags); |
| 157 | ret = nsec_to_clock_t(tsk->delays->blkio_delay + | 160 | ret = nsec_to_clock_t(tsk->delays->blkio_delay + |
| 158 | tsk->delays->swapin_delay); | 161 | tsk->delays->swapin_delay); |
| 159 | spin_unlock(&tsk->delays->lock); | 162 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
| 160 | return ret; | 163 | return ret; |
| 161 | } | 164 | } |
| 162 | 165 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index f250a5e3e281..06de6c4e8ca3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -128,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 128 | flush_sigqueue(&tsk->pending); | 128 | flush_sigqueue(&tsk->pending); |
| 129 | if (sig) { | 129 | if (sig) { |
| 130 | flush_sigqueue(&sig->shared_pending); | 130 | flush_sigqueue(&sig->shared_pending); |
| 131 | taskstats_tgid_free(sig); | ||
| 131 | __cleanup_signal(sig); | 132 | __cleanup_signal(sig); |
| 132 | } | 133 | } |
| 133 | } | 134 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 7dc6140baac6..3da978eec791 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -830,7 +830,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
| 830 | if (clone_flags & CLONE_THREAD) { | 830 | if (clone_flags & CLONE_THREAD) { |
| 831 | atomic_inc(¤t->signal->count); | 831 | atomic_inc(¤t->signal->count); |
| 832 | atomic_inc(¤t->signal->live); | 832 | atomic_inc(¤t->signal->live); |
| 833 | taskstats_tgid_alloc(current->signal); | 833 | taskstats_tgid_alloc(current); |
| 834 | return 0; | 834 | return 0; |
| 835 | } | 835 | } |
| 836 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 836 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
| @@ -897,7 +897,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
| 897 | void __cleanup_signal(struct signal_struct *sig) | 897 | void __cleanup_signal(struct signal_struct *sig) |
| 898 | { | 898 | { |
| 899 | exit_thread_group_keys(sig); | 899 | exit_thread_group_keys(sig); |
| 900 | taskstats_tgid_free(sig); | ||
| 901 | kmem_cache_free(signal_cachep, sig); | 900 | kmem_cache_free(signal_cachep, sig); |
| 902 | } | 901 | } |
| 903 | 902 | ||
| @@ -984,6 +983,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 984 | if (!p) | 983 | if (!p) |
| 985 | goto fork_out; | 984 | goto fork_out; |
| 986 | 985 | ||
| 986 | rt_mutex_init_task(p); | ||
| 987 | |||
| 987 | #ifdef CONFIG_TRACE_IRQFLAGS | 988 | #ifdef CONFIG_TRACE_IRQFLAGS |
| 988 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); | 989 | DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); |
| 989 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); | 990 | DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); |
| @@ -1088,8 +1089,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1088 | p->lockdep_recursion = 0; | 1089 | p->lockdep_recursion = 0; |
| 1089 | #endif | 1090 | #endif |
| 1090 | 1091 | ||
| 1091 | rt_mutex_init_task(p); | ||
| 1092 | |||
| 1093 | #ifdef CONFIG_DEBUG_MUTEXES | 1092 | #ifdef CONFIG_DEBUG_MUTEXES |
| 1094 | p->blocked_on = NULL; /* not blocked yet */ | 1093 | p->blocked_on = NULL; /* not blocked yet */ |
| 1095 | #endif | 1094 | #endif |
diff --git a/kernel/futex.c b/kernel/futex.c index b364e0026191..93ef30ba209f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -1507,6 +1507,13 @@ static int futex_fd(u32 __user *uaddr, int signal) | |||
| 1507 | struct futex_q *q; | 1507 | struct futex_q *q; |
| 1508 | struct file *filp; | 1508 | struct file *filp; |
| 1509 | int ret, err; | 1509 | int ret, err; |
| 1510 | static unsigned long printk_interval; | ||
| 1511 | |||
| 1512 | if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { | ||
| 1513 | printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " | ||
| 1514 | "will be removed from the kernel in June 2007\n", | ||
| 1515 | current->comm); | ||
| 1516 | } | ||
| 1510 | 1517 | ||
| 1511 | ret = -EINVAL; | 1518 | ret = -EINVAL; |
| 1512 | if (!valid_signal(signal)) | 1519 | if (!valid_signal(signal)) |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 11c99697acfe..ebfd24a41858 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -233,6 +233,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) | |||
| 233 | chip->shutdown = chip->disable; | 233 | chip->shutdown = chip->disable; |
| 234 | if (!chip->name) | 234 | if (!chip->name) |
| 235 | chip->name = chip->typename; | 235 | chip->name = chip->typename; |
| 236 | if (!chip->end) | ||
| 237 | chip->end = dummy_irq_chip.end; | ||
| 236 | } | 238 | } |
| 237 | 239 | ||
| 238 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) | 240 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) |
| @@ -499,7 +501,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc) | |||
| 499 | #endif /* CONFIG_SMP */ | 501 | #endif /* CONFIG_SMP */ |
| 500 | 502 | ||
| 501 | void | 503 | void |
| 502 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained) | 504 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
| 505 | const char *name) | ||
| 503 | { | 506 | { |
| 504 | struct irq_desc *desc; | 507 | struct irq_desc *desc; |
| 505 | unsigned long flags; | 508 | unsigned long flags; |
| @@ -540,6 +543,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained) | |||
| 540 | desc->depth = 1; | 543 | desc->depth = 1; |
| 541 | } | 544 | } |
| 542 | desc->handle_irq = handle; | 545 | desc->handle_irq = handle; |
| 546 | desc->name = name; | ||
| 543 | 547 | ||
| 544 | if (handle != handle_bad_irq && is_chained) { | 548 | if (handle != handle_bad_irq && is_chained) { |
| 545 | desc->status &= ~IRQ_DISABLED; | 549 | desc->status &= ~IRQ_DISABLED; |
| @@ -555,30 +559,13 @@ set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, | |||
| 555 | irq_flow_handler_t handle) | 559 | irq_flow_handler_t handle) |
| 556 | { | 560 | { |
| 557 | set_irq_chip(irq, chip); | 561 | set_irq_chip(irq, chip); |
| 558 | __set_irq_handler(irq, handle, 0); | 562 | __set_irq_handler(irq, handle, 0, NULL); |
| 559 | } | 563 | } |
| 560 | 564 | ||
| 561 | /* | 565 | void |
| 562 | * Get a descriptive string for the highlevel handler, for | 566 | set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, |
| 563 | * /proc/interrupts output: | 567 | irq_flow_handler_t handle, const char *name) |
| 564 | */ | ||
| 565 | const char * | ||
| 566 | handle_irq_name(irq_flow_handler_t handle) | ||
| 567 | { | 568 | { |
| 568 | if (handle == handle_level_irq) | 569 | set_irq_chip(irq, chip); |
| 569 | return "level "; | 570 | __set_irq_handler(irq, handle, 0, name); |
| 570 | if (handle == handle_fasteoi_irq) | ||
| 571 | return "fasteoi"; | ||
| 572 | if (handle == handle_edge_irq) | ||
| 573 | return "edge "; | ||
| 574 | if (handle == handle_simple_irq) | ||
| 575 | return "simple "; | ||
| 576 | #ifdef CONFIG_SMP | ||
| 577 | if (handle == handle_percpu_irq) | ||
| 578 | return "percpu "; | ||
| 579 | #endif | ||
| 580 | if (handle == handle_bad_irq) | ||
| 581 | return "bad "; | ||
| 582 | |||
| 583 | return NULL; | ||
| 584 | } | 571 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6879202afe9a..b385878c6e80 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -216,6 +216,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 216 | { | 216 | { |
| 217 | struct irq_desc *desc = irq_desc + irq; | 217 | struct irq_desc *desc = irq_desc + irq; |
| 218 | struct irqaction *old, **p; | 218 | struct irqaction *old, **p; |
| 219 | const char *old_name = NULL; | ||
| 219 | unsigned long flags; | 220 | unsigned long flags; |
| 220 | int shared = 0; | 221 | int shared = 0; |
| 221 | 222 | ||
| @@ -255,8 +256,10 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 255 | * set the trigger type must match. | 256 | * set the trigger type must match. |
| 256 | */ | 257 | */ |
| 257 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 258 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
| 258 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) | 259 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { |
| 260 | old_name = old->name; | ||
| 259 | goto mismatch; | 261 | goto mismatch; |
| 262 | } | ||
| 260 | 263 | ||
| 261 | #if defined(CONFIG_IRQ_PER_CPU) | 264 | #if defined(CONFIG_IRQ_PER_CPU) |
| 262 | /* All handlers must agree on per-cpuness */ | 265 | /* All handlers must agree on per-cpuness */ |
| @@ -322,11 +325,13 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 322 | return 0; | 325 | return 0; |
| 323 | 326 | ||
| 324 | mismatch: | 327 | mismatch: |
| 325 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 326 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 328 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
| 327 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | 329 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); |
| 330 | if (old_name) | ||
| 331 | printk(KERN_ERR "current handler: %s\n", old_name); | ||
| 328 | dump_stack(); | 332 | dump_stack(); |
| 329 | } | 333 | } |
| 334 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 330 | return -EBUSY; | 335 | return -EBUSY; |
| 331 | } | 336 | } |
| 332 | 337 | ||
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 607c7809ad01..9a352667007c 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
| @@ -57,7 +57,7 @@ static int irq_affinity_write_proc(struct file *file, const char __user *buffer, | |||
| 57 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity) | 57 | if (!irq_desc[irq].chip->set_affinity || no_irq_affinity) |
| 58 | return -EIO; | 58 | return -EIO; |
| 59 | 59 | ||
| 60 | err = cpumask_parse(buffer, count, new_value); | 60 | err = cpumask_parse_user(buffer, count, new_value); |
| 61 | if (err) | 61 | if (err) |
| 62 | return err; | 62 | return err; |
| 63 | 63 | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 543ea2e5ad93..9c7e2e4c1fe7 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -147,7 +147,11 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
| 147 | if (unlikely(irqfixup)) { | 147 | if (unlikely(irqfixup)) { |
| 148 | /* Don't punish working computers */ | 148 | /* Don't punish working computers */ |
| 149 | if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { | 149 | if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { |
| 150 | int ok = misrouted_irq(irq); | 150 | int ok; |
| 151 | |||
| 152 | spin_unlock(&desc->lock); | ||
| 153 | ok = misrouted_irq(irq); | ||
| 154 | spin_lock(&desc->lock); | ||
| 151 | if (action_ret == IRQ_NONE) | 155 | if (action_ret == IRQ_NONE) |
| 152 | desc->irqs_unhandled -= ok; | 156 | desc->irqs_unhandled -= ok; |
| 153 | } | 157 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 4c0553461000..c9fefdb1a7db 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -575,6 +575,8 @@ static noinline int print_circular_bug_tail(void) | |||
| 575 | return 0; | 575 | return 0; |
| 576 | } | 576 | } |
| 577 | 577 | ||
| 578 | #define RECURSION_LIMIT 40 | ||
| 579 | |||
| 578 | static int noinline print_infinite_recursion_bug(void) | 580 | static int noinline print_infinite_recursion_bug(void) |
| 579 | { | 581 | { |
| 580 | __raw_spin_unlock(&hash_lock); | 582 | __raw_spin_unlock(&hash_lock); |
| @@ -595,7 +597,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
| 595 | debug_atomic_inc(&nr_cyclic_check_recursions); | 597 | debug_atomic_inc(&nr_cyclic_check_recursions); |
| 596 | if (depth > max_recursion_depth) | 598 | if (depth > max_recursion_depth) |
| 597 | max_recursion_depth = depth; | 599 | max_recursion_depth = depth; |
| 598 | if (depth >= 20) | 600 | if (depth >= RECURSION_LIMIT) |
| 599 | return print_infinite_recursion_bug(); | 601 | return print_infinite_recursion_bug(); |
| 600 | /* | 602 | /* |
| 601 | * Check this lock's dependency list: | 603 | * Check this lock's dependency list: |
| @@ -645,7 +647,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) | |||
| 645 | 647 | ||
| 646 | if (depth > max_recursion_depth) | 648 | if (depth > max_recursion_depth) |
| 647 | max_recursion_depth = depth; | 649 | max_recursion_depth = depth; |
| 648 | if (depth >= 20) | 650 | if (depth >= RECURSION_LIMIT) |
| 649 | return print_infinite_recursion_bug(); | 651 | return print_infinite_recursion_bug(); |
| 650 | 652 | ||
| 651 | debug_atomic_inc(&nr_find_usage_forwards_checks); | 653 | debug_atomic_inc(&nr_find_usage_forwards_checks); |
| @@ -684,7 +686,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
| 684 | 686 | ||
| 685 | if (depth > max_recursion_depth) | 687 | if (depth > max_recursion_depth) |
| 686 | max_recursion_depth = depth; | 688 | max_recursion_depth = depth; |
| 687 | if (depth >= 20) | 689 | if (depth >= RECURSION_LIMIT) |
| 688 | return print_infinite_recursion_bug(); | 690 | return print_infinite_recursion_bug(); |
| 689 | 691 | ||
| 690 | debug_atomic_inc(&nr_find_usage_backwards_checks); | 692 | debug_atomic_inc(&nr_find_usage_backwards_checks); |
| @@ -1079,7 +1081,8 @@ static int static_obj(void *obj) | |||
| 1079 | */ | 1081 | */ |
| 1080 | for_each_possible_cpu(i) { | 1082 | for_each_possible_cpu(i) { |
| 1081 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | 1083 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); |
| 1082 | end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); | 1084 | end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM |
| 1085 | + per_cpu_offset(i); | ||
| 1083 | 1086 | ||
| 1084 | if ((addr >= start) && (addr < end)) | 1087 | if ((addr >= start) && (addr < end)) |
| 1085 | return 1; | 1088 | return 1; |
| @@ -1114,8 +1117,6 @@ static int count_matching_names(struct lock_class *new_class) | |||
| 1114 | return count + 1; | 1117 | return count + 1; |
| 1115 | } | 1118 | } |
| 1116 | 1119 | ||
| 1117 | extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void); | ||
| 1118 | |||
| 1119 | /* | 1120 | /* |
| 1120 | * Register a lock's class in the hash-table, if the class is not present | 1121 | * Register a lock's class in the hash-table, if the class is not present |
| 1121 | * yet. Otherwise we look it up. We cache the result in the lock object | 1122 | * yet. Otherwise we look it up. We cache the result in the lock object |
| @@ -1153,8 +1154,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 1153 | * (or spin_lock_init()) call - which acts as the key. For static | 1154 | * (or spin_lock_init()) call - which acts as the key. For static |
| 1154 | * locks we use the lock object itself as the key. | 1155 | * locks we use the lock object itself as the key. |
| 1155 | */ | 1156 | */ |
| 1156 | if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) | 1157 | BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class)); |
| 1157 | __error_too_big_MAX_LOCKDEP_SUBCLASSES(); | ||
| 1158 | 1158 | ||
| 1159 | key = lock->key->subkeys + subclass; | 1159 | key = lock->key->subkeys + subclass; |
| 1160 | 1160 | ||
| @@ -1177,7 +1177,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 1177 | * itself, so actual lookup of the hash should be once per lock object. | 1177 | * itself, so actual lookup of the hash should be once per lock object. |
| 1178 | */ | 1178 | */ |
| 1179 | static inline struct lock_class * | 1179 | static inline struct lock_class * |
| 1180 | register_lock_class(struct lockdep_map *lock, unsigned int subclass) | 1180 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) |
| 1181 | { | 1181 | { |
| 1182 | struct lockdep_subclass_key *key; | 1182 | struct lockdep_subclass_key *key; |
| 1183 | struct list_head *hash_head; | 1183 | struct list_head *hash_head; |
| @@ -1249,7 +1249,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 1249 | out_unlock_set: | 1249 | out_unlock_set: |
| 1250 | __raw_spin_unlock(&hash_lock); | 1250 | __raw_spin_unlock(&hash_lock); |
| 1251 | 1251 | ||
| 1252 | if (!subclass) | 1252 | if (!subclass || force) |
| 1253 | lock->class_cache = class; | 1253 | lock->class_cache = class; |
| 1254 | 1254 | ||
| 1255 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | 1255 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); |
| @@ -1937,7 +1937,7 @@ void trace_softirqs_off(unsigned long ip) | |||
| 1937 | * Initialize a lock instance's lock-class mapping info: | 1937 | * Initialize a lock instance's lock-class mapping info: |
| 1938 | */ | 1938 | */ |
| 1939 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 1939 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
| 1940 | struct lock_class_key *key) | 1940 | struct lock_class_key *key, int subclass) |
| 1941 | { | 1941 | { |
| 1942 | if (unlikely(!debug_locks)) | 1942 | if (unlikely(!debug_locks)) |
| 1943 | return; | 1943 | return; |
| @@ -1957,6 +1957,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 1957 | lock->name = name; | 1957 | lock->name = name; |
| 1958 | lock->key = key; | 1958 | lock->key = key; |
| 1959 | lock->class_cache = NULL; | 1959 | lock->class_cache = NULL; |
| 1960 | if (subclass) | ||
| 1961 | register_lock_class(lock, subclass, 1); | ||
| 1960 | } | 1962 | } |
| 1961 | 1963 | ||
| 1962 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 1964 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
| @@ -1995,7 +1997,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 1995 | * Not cached yet or subclass? | 1997 | * Not cached yet or subclass? |
| 1996 | */ | 1998 | */ |
| 1997 | if (unlikely(!class)) { | 1999 | if (unlikely(!class)) { |
| 1998 | class = register_lock_class(lock, subclass); | 2000 | class = register_lock_class(lock, subclass, 0); |
| 1999 | if (!class) | 2001 | if (!class) |
| 2000 | return 0; | 2002 | return 0; |
| 2001 | } | 2003 | } |
diff --git a/kernel/module.c b/kernel/module.c index 7f60e782de1e..f0166563c602 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -87,6 +87,12 @@ static inline int strong_try_module_get(struct module *mod) | |||
| 87 | return try_module_get(mod); | 87 | return try_module_get(mod); |
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | static inline void add_taint_module(struct module *mod, unsigned flag) | ||
| 91 | { | ||
| 92 | add_taint(flag); | ||
| 93 | mod->taints |= flag; | ||
| 94 | } | ||
| 95 | |||
| 90 | /* A thread that wants to hold a reference to a module only while it | 96 | /* A thread that wants to hold a reference to a module only while it |
| 91 | * is running can call ths to safely exit. | 97 | * is running can call ths to safely exit. |
| 92 | * nfsd and lockd use this. | 98 | * nfsd and lockd use this. |
| @@ -847,12 +853,10 @@ static int check_version(Elf_Shdr *sechdrs, | |||
| 847 | return 0; | 853 | return 0; |
| 848 | } | 854 | } |
| 849 | /* Not in module's version table. OK, but that taints the kernel. */ | 855 | /* Not in module's version table. OK, but that taints the kernel. */ |
| 850 | if (!(tainted & TAINT_FORCED_MODULE)) { | 856 | if (!(tainted & TAINT_FORCED_MODULE)) |
| 851 | printk("%s: no version for \"%s\" found: kernel tainted.\n", | 857 | printk("%s: no version for \"%s\" found: kernel tainted.\n", |
| 852 | mod->name, symname); | 858 | mod->name, symname); |
| 853 | add_taint(TAINT_FORCED_MODULE); | 859 | add_taint_module(mod, TAINT_FORCED_MODULE); |
| 854 | mod->taints |= TAINT_FORCED_MODULE; | ||
| 855 | } | ||
| 856 | return 1; | 860 | return 1; |
| 857 | } | 861 | } |
| 858 | 862 | ||
| @@ -910,7 +914,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs, | |||
| 910 | unsigned long ret; | 914 | unsigned long ret; |
| 911 | const unsigned long *crc; | 915 | const unsigned long *crc; |
| 912 | 916 | ||
| 913 | ret = __find_symbol(name, &owner, &crc, mod->license_gplok); | 917 | ret = __find_symbol(name, &owner, &crc, |
| 918 | !(mod->taints & TAINT_PROPRIETARY_MODULE)); | ||
| 914 | if (ret) { | 919 | if (ret) { |
| 915 | /* use_module can fail due to OOM, or module unloading */ | 920 | /* use_module can fail due to OOM, or module unloading */ |
| 916 | if (!check_version(sechdrs, versindex, name, mod, crc) || | 921 | if (!check_version(sechdrs, versindex, name, mod, crc) || |
| @@ -1335,12 +1340,11 @@ static void set_license(struct module *mod, const char *license) | |||
| 1335 | if (!license) | 1340 | if (!license) |
| 1336 | license = "unspecified"; | 1341 | license = "unspecified"; |
| 1337 | 1342 | ||
| 1338 | mod->license_gplok = license_is_gpl_compatible(license); | 1343 | if (!license_is_gpl_compatible(license)) { |
| 1339 | if (!mod->license_gplok && !(tainted & TAINT_PROPRIETARY_MODULE)) { | 1344 | if (!(tainted & TAINT_PROPRIETARY_MODULE)) |
| 1340 | printk(KERN_WARNING "%s: module license '%s' taints kernel.\n", | 1345 | printk(KERN_WARNING "%s: module license '%s' taints " |
| 1341 | mod->name, license); | 1346 | "kernel.\n", mod->name, license); |
| 1342 | add_taint(TAINT_PROPRIETARY_MODULE); | 1347 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
| 1343 | mod->taints |= TAINT_PROPRIETARY_MODULE; | ||
| 1344 | } | 1348 | } |
| 1345 | } | 1349 | } |
| 1346 | 1350 | ||
| @@ -1619,8 +1623,7 @@ static struct module *load_module(void __user *umod, | |||
| 1619 | modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); | 1623 | modmagic = get_modinfo(sechdrs, infoindex, "vermagic"); |
| 1620 | /* This is allowed: modprobe --force will invalidate it. */ | 1624 | /* This is allowed: modprobe --force will invalidate it. */ |
| 1621 | if (!modmagic) { | 1625 | if (!modmagic) { |
| 1622 | add_taint(TAINT_FORCED_MODULE); | 1626 | add_taint_module(mod, TAINT_FORCED_MODULE); |
| 1623 | mod->taints |= TAINT_FORCED_MODULE; | ||
| 1624 | printk(KERN_WARNING "%s: no version magic, tainting kernel.\n", | 1627 | printk(KERN_WARNING "%s: no version magic, tainting kernel.\n", |
| 1625 | mod->name); | 1628 | mod->name); |
| 1626 | } else if (!same_magic(modmagic, vermagic)) { | 1629 | } else if (!same_magic(modmagic, vermagic)) { |
| @@ -1714,14 +1717,10 @@ static struct module *load_module(void __user *umod, | |||
| 1714 | /* Set up license info based on the info section */ | 1717 | /* Set up license info based on the info section */ |
| 1715 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); | 1718 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); |
| 1716 | 1719 | ||
| 1717 | if (strcmp(mod->name, "ndiswrapper") == 0) { | 1720 | if (strcmp(mod->name, "ndiswrapper") == 0) |
| 1718 | add_taint(TAINT_PROPRIETARY_MODULE); | ||
| 1719 | mod->taints |= TAINT_PROPRIETARY_MODULE; | ||
| 1720 | } | ||
| 1721 | if (strcmp(mod->name, "driverloader") == 0) { | ||
| 1722 | add_taint(TAINT_PROPRIETARY_MODULE); | 1721 | add_taint(TAINT_PROPRIETARY_MODULE); |
| 1723 | mod->taints |= TAINT_PROPRIETARY_MODULE; | 1722 | if (strcmp(mod->name, "driverloader") == 0) |
| 1724 | } | 1723 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
| 1725 | 1724 | ||
| 1726 | /* Set up MODINFO_ATTR fields */ | 1725 | /* Set up MODINFO_ATTR fields */ |
| 1727 | setup_modinfo(mod, sechdrs, infoindex); | 1726 | setup_modinfo(mod, sechdrs, infoindex); |
| @@ -1766,8 +1765,7 @@ static struct module *load_module(void __user *umod, | |||
| 1766 | (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { | 1765 | (mod->num_unused_gpl_syms && !unusedgplcrcindex)) { |
| 1767 | printk(KERN_WARNING "%s: No versions for exported symbols." | 1766 | printk(KERN_WARNING "%s: No versions for exported symbols." |
| 1768 | " Tainting kernel.\n", mod->name); | 1767 | " Tainting kernel.\n", mod->name); |
| 1769 | add_taint(TAINT_FORCED_MODULE); | 1768 | add_taint_module(mod, TAINT_FORCED_MODULE); |
| 1770 | mod->taints |= TAINT_FORCED_MODULE; | ||
| 1771 | } | 1769 | } |
| 1772 | #endif | 1770 | #endif |
| 1773 | 1771 | ||
| @@ -2132,9 +2130,33 @@ static void m_stop(struct seq_file *m, void *p) | |||
| 2132 | mutex_unlock(&module_mutex); | 2130 | mutex_unlock(&module_mutex); |
| 2133 | } | 2131 | } |
| 2134 | 2132 | ||
| 2133 | static char *taint_flags(unsigned int taints, char *buf) | ||
| 2134 | { | ||
| 2135 | int bx = 0; | ||
| 2136 | |||
| 2137 | if (taints) { | ||
| 2138 | buf[bx++] = '('; | ||
| 2139 | if (taints & TAINT_PROPRIETARY_MODULE) | ||
| 2140 | buf[bx++] = 'P'; | ||
| 2141 | if (taints & TAINT_FORCED_MODULE) | ||
| 2142 | buf[bx++] = 'F'; | ||
| 2143 | /* | ||
| 2144 | * TAINT_FORCED_RMMOD: could be added. | ||
| 2145 | * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | ||
| 2146 | * apply to modules. | ||
| 2147 | */ | ||
| 2148 | buf[bx++] = ')'; | ||
| 2149 | } | ||
| 2150 | buf[bx] = '\0'; | ||
| 2151 | |||
| 2152 | return buf; | ||
| 2153 | } | ||
| 2154 | |||
| 2135 | static int m_show(struct seq_file *m, void *p) | 2155 | static int m_show(struct seq_file *m, void *p) |
| 2136 | { | 2156 | { |
| 2137 | struct module *mod = list_entry(p, struct module, list); | 2157 | struct module *mod = list_entry(p, struct module, list); |
| 2158 | char buf[8]; | ||
| 2159 | |||
| 2138 | seq_printf(m, "%s %lu", | 2160 | seq_printf(m, "%s %lu", |
| 2139 | mod->name, mod->init_size + mod->core_size); | 2161 | mod->name, mod->init_size + mod->core_size); |
| 2140 | print_unload_info(m, mod); | 2162 | print_unload_info(m, mod); |
| @@ -2147,6 +2169,10 @@ static int m_show(struct seq_file *m, void *p) | |||
| 2147 | /* Used by oprofile and other similar tools. */ | 2169 | /* Used by oprofile and other similar tools. */ |
| 2148 | seq_printf(m, " 0x%p", mod->module_core); | 2170 | seq_printf(m, " 0x%p", mod->module_core); |
| 2149 | 2171 | ||
| 2172 | /* Taints info */ | ||
| 2173 | if (mod->taints) | ||
| 2174 | seq_printf(m, " %s", taint_flags(mod->taints, buf)); | ||
| 2175 | |||
| 2150 | seq_printf(m, "\n"); | 2176 | seq_printf(m, "\n"); |
| 2151 | return 0; | 2177 | return 0; |
| 2152 | } | 2178 | } |
| @@ -2235,28 +2261,6 @@ struct module *module_text_address(unsigned long addr) | |||
| 2235 | return mod; | 2261 | return mod; |
| 2236 | } | 2262 | } |
| 2237 | 2263 | ||
| 2238 | static char *taint_flags(unsigned int taints, char *buf) | ||
| 2239 | { | ||
| 2240 | *buf = '\0'; | ||
| 2241 | if (taints) { | ||
| 2242 | int bx; | ||
| 2243 | |||
| 2244 | buf[0] = '('; | ||
| 2245 | bx = 1; | ||
| 2246 | if (taints & TAINT_PROPRIETARY_MODULE) | ||
| 2247 | buf[bx++] = 'P'; | ||
| 2248 | if (taints & TAINT_FORCED_MODULE) | ||
| 2249 | buf[bx++] = 'F'; | ||
| 2250 | /* | ||
| 2251 | * TAINT_FORCED_RMMOD: could be added. | ||
| 2252 | * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't | ||
| 2253 | * apply to modules. | ||
| 2254 | */ | ||
| 2255 | buf[bx] = ')'; | ||
| 2256 | } | ||
| 2257 | return buf; | ||
| 2258 | } | ||
| 2259 | |||
| 2260 | /* Don't grab lock, we're oopsing. */ | 2264 | /* Don't grab lock, we're oopsing. */ |
| 2261 | void print_modules(void) | 2265 | void print_modules(void) |
| 2262 | { | 2266 | { |
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index e3203c654dda..18651641a7b5 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
| @@ -91,7 +91,7 @@ void debug_mutex_init(struct mutex *lock, const char *name, | |||
| 91 | * Make sure we are not reinitializing a held lock: | 91 | * Make sure we are not reinitializing a held lock: |
| 92 | */ | 92 | */ |
| 93 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 93 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
| 94 | lockdep_init_map(&lock->dep_map, name, key); | 94 | lockdep_init_map(&lock->dep_map, name, key, 0); |
| 95 | #endif | 95 | #endif |
| 96 | lock->owner = NULL; | 96 | lock->owner = NULL; |
| 97 | lock->magic = lock; | 97 | lock->magic = lock; |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 6ebdb82a0ce4..674aceb7335a 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
| @@ -44,11 +44,9 @@ static inline struct nsproxy *clone_namespaces(struct nsproxy *orig) | |||
| 44 | { | 44 | { |
| 45 | struct nsproxy *ns; | 45 | struct nsproxy *ns; |
| 46 | 46 | ||
| 47 | ns = kmalloc(sizeof(struct nsproxy), GFP_KERNEL); | 47 | ns = kmemdup(orig, sizeof(struct nsproxy), GFP_KERNEL); |
| 48 | if (ns) { | 48 | if (ns) |
| 49 | memcpy(ns, orig, sizeof(struct nsproxy)); | ||
| 50 | atomic_set(&ns->count, 1); | 49 | atomic_set(&ns->count, 1); |
| 51 | } | ||
| 52 | return ns; | 50 | return ns; |
| 53 | } | 51 | } |
| 54 | 52 | ||
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 479b16b44f79..7c3e1e6dfb5b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -88,6 +88,19 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | |||
| 88 | } | 88 | } |
| 89 | 89 | ||
| 90 | /* | 90 | /* |
| 91 | * Divide and limit the result to res >= 1 | ||
| 92 | * | ||
| 93 | * This is necessary to prevent signal delivery starvation, when the result of | ||
| 94 | * the division would be rounded down to 0. | ||
| 95 | */ | ||
| 96 | static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) | ||
| 97 | { | ||
| 98 | cputime_t res = cputime_div(time, div); | ||
| 99 | |||
| 100 | return max_t(cputime_t, res, 1); | ||
| 101 | } | ||
| 102 | |||
| 103 | /* | ||
| 91 | * Update expiry time from increment, and increase overrun count, | 104 | * Update expiry time from increment, and increase overrun count, |
| 92 | * given the current clock sample. | 105 | * given the current clock sample. |
| 93 | */ | 106 | */ |
| @@ -483,8 +496,8 @@ static void process_timer_rebalance(struct task_struct *p, | |||
| 483 | BUG(); | 496 | BUG(); |
| 484 | break; | 497 | break; |
| 485 | case CPUCLOCK_PROF: | 498 | case CPUCLOCK_PROF: |
| 486 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 499 | left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), |
| 487 | nthreads); | 500 | nthreads); |
| 488 | do { | 501 | do { |
| 489 | if (likely(!(t->flags & PF_EXITING))) { | 502 | if (likely(!(t->flags & PF_EXITING))) { |
| 490 | ticks = cputime_add(prof_ticks(t), left); | 503 | ticks = cputime_add(prof_ticks(t), left); |
| @@ -498,8 +511,8 @@ static void process_timer_rebalance(struct task_struct *p, | |||
| 498 | } while (t != p); | 511 | } while (t != p); |
| 499 | break; | 512 | break; |
| 500 | case CPUCLOCK_VIRT: | 513 | case CPUCLOCK_VIRT: |
| 501 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), | 514 | left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), |
| 502 | nthreads); | 515 | nthreads); |
| 503 | do { | 516 | do { |
| 504 | if (likely(!(t->flags & PF_EXITING))) { | 517 | if (likely(!(t->flags & PF_EXITING))) { |
| 505 | ticks = cputime_add(virt_ticks(t), left); | 518 | ticks = cputime_add(virt_ticks(t), left); |
| @@ -515,6 +528,7 @@ static void process_timer_rebalance(struct task_struct *p, | |||
| 515 | case CPUCLOCK_SCHED: | 528 | case CPUCLOCK_SCHED: |
| 516 | nsleft = expires.sched - val.sched; | 529 | nsleft = expires.sched - val.sched; |
| 517 | do_div(nsleft, nthreads); | 530 | do_div(nsleft, nthreads); |
| 531 | nsleft = max_t(unsigned long long, nsleft, 1); | ||
| 518 | do { | 532 | do { |
| 519 | if (likely(!(t->flags & PF_EXITING))) { | 533 | if (likely(!(t->flags & PF_EXITING))) { |
| 520 | ns = t->sched_time + nsleft; | 534 | ns = t->sched_time + nsleft; |
| @@ -1159,12 +1173,13 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1159 | 1173 | ||
| 1160 | prof_left = cputime_sub(prof_expires, utime); | 1174 | prof_left = cputime_sub(prof_expires, utime); |
| 1161 | prof_left = cputime_sub(prof_left, stime); | 1175 | prof_left = cputime_sub(prof_left, stime); |
| 1162 | prof_left = cputime_div(prof_left, nthreads); | 1176 | prof_left = cputime_div_non_zero(prof_left, nthreads); |
| 1163 | virt_left = cputime_sub(virt_expires, utime); | 1177 | virt_left = cputime_sub(virt_expires, utime); |
| 1164 | virt_left = cputime_div(virt_left, nthreads); | 1178 | virt_left = cputime_div_non_zero(virt_left, nthreads); |
| 1165 | if (sched_expires) { | 1179 | if (sched_expires) { |
| 1166 | sched_left = sched_expires - sched_time; | 1180 | sched_left = sched_expires - sched_time; |
| 1167 | do_div(sched_left, nthreads); | 1181 | do_div(sched_left, nthreads); |
| 1182 | sched_left = max_t(unsigned long long, sched_left, 1); | ||
| 1168 | } else { | 1183 | } else { |
| 1169 | sched_left = 0; | 1184 | sched_left = 0; |
| 1170 | } | 1185 | } |
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index d72234942798..b1fb7866b0b3 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
| 19 | #include <linux/mount.h> | 19 | #include <linux/mount.h> |
| 20 | #include <linux/pm.h> | 20 | #include <linux/pm.h> |
| 21 | #include <linux/console.h> | ||
| 21 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
| 22 | 23 | ||
| 23 | #include "power.h" | 24 | #include "power.h" |
| @@ -70,7 +71,7 @@ static inline void platform_finish(void) | |||
| 70 | 71 | ||
| 71 | static int prepare_processes(void) | 72 | static int prepare_processes(void) |
| 72 | { | 73 | { |
| 73 | int error; | 74 | int error = 0; |
| 74 | 75 | ||
| 75 | pm_prepare_console(); | 76 | pm_prepare_console(); |
| 76 | 77 | ||
| @@ -83,6 +84,12 @@ static int prepare_processes(void) | |||
| 83 | goto thaw; | 84 | goto thaw; |
| 84 | } | 85 | } |
| 85 | 86 | ||
| 87 | if (pm_disk_mode == PM_DISK_TESTPROC) { | ||
| 88 | printk("swsusp debug: Waiting for 5 seconds.\n"); | ||
| 89 | mdelay(5000); | ||
| 90 | goto thaw; | ||
| 91 | } | ||
| 92 | |||
| 86 | /* Free memory before shutting down devices. */ | 93 | /* Free memory before shutting down devices. */ |
| 87 | if (!(error = swsusp_shrink_memory())) | 94 | if (!(error = swsusp_shrink_memory())) |
| 88 | return 0; | 95 | return 0; |
| @@ -119,11 +126,21 @@ int pm_suspend_disk(void) | |||
| 119 | if (error) | 126 | if (error) |
| 120 | return error; | 127 | return error; |
| 121 | 128 | ||
| 129 | if (pm_disk_mode == PM_DISK_TESTPROC) | ||
| 130 | goto Thaw; | ||
| 131 | |||
| 132 | suspend_console(); | ||
| 122 | error = device_suspend(PMSG_FREEZE); | 133 | error = device_suspend(PMSG_FREEZE); |
| 123 | if (error) { | 134 | if (error) { |
| 135 | resume_console(); | ||
| 124 | printk("Some devices failed to suspend\n"); | 136 | printk("Some devices failed to suspend\n"); |
| 125 | unprepare_processes(); | 137 | goto Thaw; |
| 126 | return error; | 138 | } |
| 139 | |||
| 140 | if (pm_disk_mode == PM_DISK_TEST) { | ||
| 141 | printk("swsusp debug: Waiting for 5 seconds.\n"); | ||
| 142 | mdelay(5000); | ||
| 143 | goto Done; | ||
| 127 | } | 144 | } |
| 128 | 145 | ||
| 129 | pr_debug("PM: snapshotting memory.\n"); | 146 | pr_debug("PM: snapshotting memory.\n"); |
| @@ -133,21 +150,24 @@ int pm_suspend_disk(void) | |||
| 133 | 150 | ||
| 134 | if (in_suspend) { | 151 | if (in_suspend) { |
| 135 | device_resume(); | 152 | device_resume(); |
| 153 | resume_console(); | ||
| 136 | pr_debug("PM: writing image.\n"); | 154 | pr_debug("PM: writing image.\n"); |
| 137 | error = swsusp_write(); | 155 | error = swsusp_write(); |
| 138 | if (!error) | 156 | if (!error) |
| 139 | power_down(pm_disk_mode); | 157 | power_down(pm_disk_mode); |
| 140 | else { | 158 | else { |
| 141 | swsusp_free(); | 159 | swsusp_free(); |
| 142 | unprepare_processes(); | 160 | goto Thaw; |
| 143 | return error; | ||
| 144 | } | 161 | } |
| 145 | } else | 162 | } else { |
| 146 | pr_debug("PM: Image restored successfully.\n"); | 163 | pr_debug("PM: Image restored successfully.\n"); |
| 164 | } | ||
| 147 | 165 | ||
| 148 | swsusp_free(); | 166 | swsusp_free(); |
| 149 | Done: | 167 | Done: |
| 150 | device_resume(); | 168 | device_resume(); |
| 169 | resume_console(); | ||
| 170 | Thaw: | ||
| 151 | unprepare_processes(); | 171 | unprepare_processes(); |
| 152 | return error; | 172 | return error; |
| 153 | } | 173 | } |
| @@ -212,7 +232,9 @@ static int software_resume(void) | |||
| 212 | 232 | ||
| 213 | pr_debug("PM: Preparing devices for restore.\n"); | 233 | pr_debug("PM: Preparing devices for restore.\n"); |
| 214 | 234 | ||
| 235 | suspend_console(); | ||
| 215 | if ((error = device_suspend(PMSG_PRETHAW))) { | 236 | if ((error = device_suspend(PMSG_PRETHAW))) { |
| 237 | resume_console(); | ||
| 216 | printk("Some devices failed to suspend\n"); | 238 | printk("Some devices failed to suspend\n"); |
| 217 | swsusp_free(); | 239 | swsusp_free(); |
| 218 | goto Thaw; | 240 | goto Thaw; |
| @@ -224,6 +246,7 @@ static int software_resume(void) | |||
| 224 | swsusp_resume(); | 246 | swsusp_resume(); |
| 225 | pr_debug("PM: Restore failed, recovering.n"); | 247 | pr_debug("PM: Restore failed, recovering.n"); |
| 226 | device_resume(); | 248 | device_resume(); |
| 249 | resume_console(); | ||
| 227 | Thaw: | 250 | Thaw: |
| 228 | unprepare_processes(); | 251 | unprepare_processes(); |
| 229 | Done: | 252 | Done: |
| @@ -241,6 +264,8 @@ static const char * const pm_disk_modes[] = { | |||
| 241 | [PM_DISK_PLATFORM] = "platform", | 264 | [PM_DISK_PLATFORM] = "platform", |
| 242 | [PM_DISK_SHUTDOWN] = "shutdown", | 265 | [PM_DISK_SHUTDOWN] = "shutdown", |
| 243 | [PM_DISK_REBOOT] = "reboot", | 266 | [PM_DISK_REBOOT] = "reboot", |
| 267 | [PM_DISK_TEST] = "test", | ||
| 268 | [PM_DISK_TESTPROC] = "testproc", | ||
| 244 | }; | 269 | }; |
| 245 | 270 | ||
| 246 | /** | 271 | /** |
| @@ -295,17 +320,19 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) | |||
| 295 | } | 320 | } |
| 296 | } | 321 | } |
| 297 | if (mode) { | 322 | if (mode) { |
| 298 | if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT) | 323 | if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT || |
| 324 | mode == PM_DISK_TEST || mode == PM_DISK_TESTPROC) { | ||
| 299 | pm_disk_mode = mode; | 325 | pm_disk_mode = mode; |
| 300 | else { | 326 | } else { |
| 301 | if (pm_ops && pm_ops->enter && | 327 | if (pm_ops && pm_ops->enter && |
| 302 | (mode == pm_ops->pm_disk_mode)) | 328 | (mode == pm_ops->pm_disk_mode)) |
| 303 | pm_disk_mode = mode; | 329 | pm_disk_mode = mode; |
| 304 | else | 330 | else |
| 305 | error = -EINVAL; | 331 | error = -EINVAL; |
| 306 | } | 332 | } |
| 307 | } else | 333 | } else { |
| 308 | error = -EINVAL; | 334 | error = -EINVAL; |
| 335 | } | ||
| 309 | 336 | ||
| 310 | pr_debug("PM: suspend-to-disk mode set to '%s'\n", | 337 | pr_debug("PM: suspend-to-disk mode set to '%s'\n", |
| 311 | pm_disk_modes[mode]); | 338 | pm_disk_modes[mode]); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 9b2ee5344dee..1a3b0dd2c3fc 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
| @@ -425,7 +425,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
| 425 | bio_set_pages_dirty(bio); | 425 | bio_set_pages_dirty(bio); |
| 426 | bio_put(bio); | 426 | bio_put(bio); |
| 427 | } else { | 427 | } else { |
| 428 | get_page(page); | 428 | if (rw == READ) |
| 429 | get_page(page); /* These pages are freed later */ | ||
| 429 | bio->bi_private = *bio_chain; | 430 | bio->bi_private = *bio_chain; |
| 430 | *bio_chain = bio; | 431 | *bio_chain = bio; |
| 431 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 432 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 93b5dd283dea..d991d3b0e5a4 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/swapops.h> | 19 | #include <linux/swapops.h> |
| 20 | #include <linux/pm.h> | 20 | #include <linux/pm.h> |
| 21 | #include <linux/fs.h> | 21 | #include <linux/fs.h> |
| 22 | #include <linux/console.h> | ||
| 22 | #include <linux/cpu.h> | 23 | #include <linux/cpu.h> |
| 23 | 24 | ||
| 24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
| @@ -173,12 +174,14 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
| 173 | /* Free memory before shutting down devices. */ | 174 | /* Free memory before shutting down devices. */ |
| 174 | error = swsusp_shrink_memory(); | 175 | error = swsusp_shrink_memory(); |
| 175 | if (!error) { | 176 | if (!error) { |
| 177 | suspend_console(); | ||
| 176 | error = device_suspend(PMSG_FREEZE); | 178 | error = device_suspend(PMSG_FREEZE); |
| 177 | if (!error) { | 179 | if (!error) { |
| 178 | in_suspend = 1; | 180 | in_suspend = 1; |
| 179 | error = swsusp_suspend(); | 181 | error = swsusp_suspend(); |
| 180 | device_resume(); | 182 | device_resume(); |
| 181 | } | 183 | } |
| 184 | resume_console(); | ||
| 182 | } | 185 | } |
| 183 | up(&pm_sem); | 186 | up(&pm_sem); |
| 184 | if (!error) | 187 | if (!error) |
| @@ -196,11 +199,13 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
| 196 | snapshot_free_unused_memory(&data->handle); | 199 | snapshot_free_unused_memory(&data->handle); |
| 197 | down(&pm_sem); | 200 | down(&pm_sem); |
| 198 | pm_prepare_console(); | 201 | pm_prepare_console(); |
| 202 | suspend_console(); | ||
| 199 | error = device_suspend(PMSG_PRETHAW); | 203 | error = device_suspend(PMSG_PRETHAW); |
| 200 | if (!error) { | 204 | if (!error) { |
| 201 | error = swsusp_resume(); | 205 | error = swsusp_resume(); |
| 202 | device_resume(); | 206 | device_resume(); |
| 203 | } | 207 | } |
| 208 | resume_console(); | ||
| 204 | pm_restore_console(); | 209 | pm_restore_console(); |
| 205 | up(&pm_sem); | 210 | up(&pm_sem); |
| 206 | break; | 211 | break; |
| @@ -289,6 +294,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
| 289 | } | 294 | } |
| 290 | 295 | ||
| 291 | /* Put devices to sleep */ | 296 | /* Put devices to sleep */ |
| 297 | suspend_console(); | ||
| 292 | error = device_suspend(PMSG_SUSPEND); | 298 | error = device_suspend(PMSG_SUSPEND); |
| 293 | if (error) { | 299 | if (error) { |
| 294 | printk(KERN_ERR "Failed to suspend some devices.\n"); | 300 | printk(KERN_ERR "Failed to suspend some devices.\n"); |
| @@ -299,7 +305,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp, | |||
| 299 | /* Wake up devices */ | 305 | /* Wake up devices */ |
| 300 | device_resume(); | 306 | device_resume(); |
| 301 | } | 307 | } |
| 302 | 308 | resume_console(); | |
| 303 | if (pm_ops->finish) | 309 | if (pm_ops->finish) |
| 304 | pm_ops->finish(PM_SUSPEND_MEM); | 310 | pm_ops->finish(PM_SUSPEND_MEM); |
| 305 | 311 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 771f5e861bcd..66426552fbfe 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -31,6 +31,7 @@ | |||
| 31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
| 32 | #include <linux/bootmem.h> | 32 | #include <linux/bootmem.h> |
| 33 | #include <linux/syscalls.h> | 33 | #include <linux/syscalls.h> |
| 34 | #include <linux/jiffies.h> | ||
| 34 | 35 | ||
| 35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
| 36 | 37 | ||
| @@ -820,15 +821,8 @@ void release_console_sem(void) | |||
| 820 | console_locked = 0; | 821 | console_locked = 0; |
| 821 | up(&console_sem); | 822 | up(&console_sem); |
| 822 | spin_unlock_irqrestore(&logbuf_lock, flags); | 823 | spin_unlock_irqrestore(&logbuf_lock, flags); |
| 823 | if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) { | 824 | if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) |
| 824 | /* | 825 | wake_up_interruptible(&log_wait); |
| 825 | * If we printk from within the lock dependency code, | ||
| 826 | * from within the scheduler code, then do not lock | ||
| 827 | * up due to self-recursion: | ||
| 828 | */ | ||
| 829 | if (!lockdep_internal()) | ||
| 830 | wake_up_interruptible(&log_wait); | ||
| 831 | } | ||
| 832 | } | 826 | } |
| 833 | EXPORT_SYMBOL(release_console_sem); | 827 | EXPORT_SYMBOL(release_console_sem); |
| 834 | 828 | ||
| @@ -1108,3 +1102,23 @@ int printk_ratelimit(void) | |||
| 1108 | printk_ratelimit_burst); | 1102 | printk_ratelimit_burst); |
| 1109 | } | 1103 | } |
| 1110 | EXPORT_SYMBOL(printk_ratelimit); | 1104 | EXPORT_SYMBOL(printk_ratelimit); |
| 1105 | |||
| 1106 | /** | ||
| 1107 | * printk_timed_ratelimit - caller-controlled printk ratelimiting | ||
| 1108 | * @caller_jiffies: pointer to caller's state | ||
| 1109 | * @interval_msecs: minimum interval between prints | ||
| 1110 | * | ||
| 1111 | * printk_timed_ratelimit() returns true if more than @interval_msecs | ||
| 1112 | * milliseconds have elapsed since the last time printk_timed_ratelimit() | ||
| 1113 | * returned true. | ||
| 1114 | */ | ||
| 1115 | bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
| 1116 | unsigned int interval_msecs) | ||
| 1117 | { | ||
| 1118 | if (*caller_jiffies == 0 || time_after(jiffies, *caller_jiffies)) { | ||
| 1119 | *caller_jiffies = jiffies + msecs_to_jiffies(interval_msecs); | ||
| 1120 | return true; | ||
| 1121 | } | ||
| 1122 | return false; | ||
| 1123 | } | ||
| 1124 | EXPORT_SYMBOL(printk_timed_ratelimit); | ||
diff --git a/kernel/profile.c b/kernel/profile.c index 857300a2afec..f940b462eec9 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
| @@ -399,7 +399,7 @@ static int prof_cpu_mask_write_proc (struct file *file, const char __user *buffe | |||
| 399 | unsigned long full_count = count, err; | 399 | unsigned long full_count = count, err; |
| 400 | cpumask_t new_value; | 400 | cpumask_t new_value; |
| 401 | 401 | ||
| 402 | err = cpumask_parse(buffer, count, new_value); | 402 | err = cpumask_parse_user(buffer, count, new_value); |
| 403 | if (err) | 403 | if (err) |
| 404 | return err; | 404 | return err; |
| 405 | 405 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 53608a59d6e3..3399701c680e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -160,15 +160,6 @@ | |||
| 160 | #define TASK_PREEMPTS_CURR(p, rq) \ | 160 | #define TASK_PREEMPTS_CURR(p, rq) \ |
| 161 | ((p)->prio < (rq)->curr->prio) | 161 | ((p)->prio < (rq)->curr->prio) |
| 162 | 162 | ||
| 163 | /* | ||
| 164 | * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] | ||
| 165 | * to time slice values: [800ms ... 100ms ... 5ms] | ||
| 166 | * | ||
| 167 | * The higher a thread's priority, the bigger timeslices | ||
| 168 | * it gets during one round of execution. But even the lowest | ||
| 169 | * priority thread gets MIN_TIMESLICE worth of execution time. | ||
| 170 | */ | ||
| 171 | |||
| 172 | #define SCALE_PRIO(x, prio) \ | 163 | #define SCALE_PRIO(x, prio) \ |
| 173 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) | 164 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) |
| 174 | 165 | ||
| @@ -180,6 +171,15 @@ static unsigned int static_prio_timeslice(int static_prio) | |||
| 180 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); | 171 | return SCALE_PRIO(DEF_TIMESLICE, static_prio); |
| 181 | } | 172 | } |
| 182 | 173 | ||
| 174 | /* | ||
| 175 | * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ] | ||
| 176 | * to time slice values: [800ms ... 100ms ... 5ms] | ||
| 177 | * | ||
| 178 | * The higher a thread's priority, the bigger timeslices | ||
| 179 | * it gets during one round of execution. But even the lowest | ||
| 180 | * priority thread gets MIN_TIMESLICE worth of execution time. | ||
| 181 | */ | ||
| 182 | |||
| 183 | static inline unsigned int task_timeslice(struct task_struct *p) | 183 | static inline unsigned int task_timeslice(struct task_struct *p) |
| 184 | { | 184 | { |
| 185 | return static_prio_timeslice(p->static_prio); | 185 | return static_prio_timeslice(p->static_prio); |
| @@ -1822,14 +1822,14 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
| 1822 | struct mm_struct *mm = next->mm; | 1822 | struct mm_struct *mm = next->mm; |
| 1823 | struct mm_struct *oldmm = prev->active_mm; | 1823 | struct mm_struct *oldmm = prev->active_mm; |
| 1824 | 1824 | ||
| 1825 | if (unlikely(!mm)) { | 1825 | if (!mm) { |
| 1826 | next->active_mm = oldmm; | 1826 | next->active_mm = oldmm; |
| 1827 | atomic_inc(&oldmm->mm_count); | 1827 | atomic_inc(&oldmm->mm_count); |
| 1828 | enter_lazy_tlb(oldmm, next); | 1828 | enter_lazy_tlb(oldmm, next); |
| 1829 | } else | 1829 | } else |
| 1830 | switch_mm(oldmm, mm, next); | 1830 | switch_mm(oldmm, mm, next); |
| 1831 | 1831 | ||
| 1832 | if (unlikely(!prev->mm)) { | 1832 | if (!prev->mm) { |
| 1833 | prev->active_mm = NULL; | 1833 | prev->active_mm = NULL; |
| 1834 | WARN_ON(rq->prev_mm); | 1834 | WARN_ON(rq->prev_mm); |
| 1835 | rq->prev_mm = oldmm; | 1835 | rq->prev_mm = oldmm; |
| @@ -3491,7 +3491,7 @@ asmlinkage void __sched preempt_schedule(void) | |||
| 3491 | * If there is a non-zero preempt_count or interrupts are disabled, | 3491 | * If there is a non-zero preempt_count or interrupts are disabled, |
| 3492 | * we do not want to preempt the current task. Just return.. | 3492 | * we do not want to preempt the current task. Just return.. |
| 3493 | */ | 3493 | */ |
| 3494 | if (unlikely(ti->preempt_count || irqs_disabled())) | 3494 | if (likely(ti->preempt_count || irqs_disabled())) |
| 3495 | return; | 3495 | return; |
| 3496 | 3496 | ||
| 3497 | need_resched: | 3497 | need_resched: |
diff --git a/kernel/signal.c b/kernel/signal.c index 7ed8d5304bec..df18c167a2a7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -267,18 +267,25 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
| 267 | int override_rlimit) | 267 | int override_rlimit) |
| 268 | { | 268 | { |
| 269 | struct sigqueue *q = NULL; | 269 | struct sigqueue *q = NULL; |
| 270 | struct user_struct *user; | ||
| 270 | 271 | ||
| 271 | atomic_inc(&t->user->sigpending); | 272 | /* |
| 273 | * In order to avoid problems with "switch_user()", we want to make | ||
| 274 | * sure that the compiler doesn't re-load "t->user" | ||
| 275 | */ | ||
| 276 | user = t->user; | ||
| 277 | barrier(); | ||
| 278 | atomic_inc(&user->sigpending); | ||
| 272 | if (override_rlimit || | 279 | if (override_rlimit || |
| 273 | atomic_read(&t->user->sigpending) <= | 280 | atomic_read(&user->sigpending) <= |
| 274 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) | 281 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) |
| 275 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 282 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
| 276 | if (unlikely(q == NULL)) { | 283 | if (unlikely(q == NULL)) { |
| 277 | atomic_dec(&t->user->sigpending); | 284 | atomic_dec(&user->sigpending); |
| 278 | } else { | 285 | } else { |
| 279 | INIT_LIST_HEAD(&q->list); | 286 | INIT_LIST_HEAD(&q->list); |
| 280 | q->flags = 0; | 287 | q->flags = 0; |
| 281 | q->user = get_uid(t->user); | 288 | q->user = get_uid(user); |
| 282 | } | 289 | } |
| 283 | return(q); | 290 | return(q); |
| 284 | } | 291 | } |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 7a3b2e75f040..d7306d0f3dfc 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
| @@ -49,6 +49,7 @@ cond_syscall(compat_sys_get_robust_list); | |||
| 49 | cond_syscall(sys_epoll_create); | 49 | cond_syscall(sys_epoll_create); |
| 50 | cond_syscall(sys_epoll_ctl); | 50 | cond_syscall(sys_epoll_ctl); |
| 51 | cond_syscall(sys_epoll_wait); | 51 | cond_syscall(sys_epoll_wait); |
| 52 | cond_syscall(sys_epoll_pwait); | ||
| 52 | cond_syscall(sys_semget); | 53 | cond_syscall(sys_semget); |
| 53 | cond_syscall(sys_semop); | 54 | cond_syscall(sys_semop); |
| 54 | cond_syscall(sys_semtimedop); | 55 | cond_syscall(sys_semtimedop); |
| @@ -134,6 +135,7 @@ cond_syscall(sys_madvise); | |||
| 134 | cond_syscall(sys_mremap); | 135 | cond_syscall(sys_mremap); |
| 135 | cond_syscall(sys_remap_file_pages); | 136 | cond_syscall(sys_remap_file_pages); |
| 136 | cond_syscall(compat_sys_move_pages); | 137 | cond_syscall(compat_sys_move_pages); |
| 138 | cond_syscall(compat_sys_migrate_pages); | ||
| 137 | 139 | ||
| 138 | /* block-layer dependent */ | 140 | /* block-layer dependent */ |
| 139 | cond_syscall(sys_bdflush); | 141 | cond_syscall(sys_bdflush); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8020fb273c4f..09e569f4792b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -136,8 +136,10 @@ static int parse_table(int __user *, int, void __user *, size_t __user *, | |||
| 136 | static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, | 136 | static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, |
| 137 | void __user *buffer, size_t *lenp, loff_t *ppos); | 137 | void __user *buffer, size_t *lenp, loff_t *ppos); |
| 138 | 138 | ||
| 139 | #ifdef CONFIG_PROC_SYSCTL | ||
| 139 | static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, | 140 | static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, |
| 140 | void __user *buffer, size_t *lenp, loff_t *ppos); | 141 | void __user *buffer, size_t *lenp, loff_t *ppos); |
| 142 | #endif | ||
| 141 | 143 | ||
| 142 | static ctl_table root_table[]; | 144 | static ctl_table root_table[]; |
| 143 | static struct ctl_table_header root_table_header = | 145 | static struct ctl_table_header root_table_header = |
| @@ -542,6 +544,7 @@ static ctl_table kern_table[] = { | |||
| 542 | .proc_handler = &proc_dointvec, | 544 | .proc_handler = &proc_dointvec, |
| 543 | }, | 545 | }, |
| 544 | #endif | 546 | #endif |
| 547 | #ifdef CONFIG_PROC_SYSCTL | ||
| 545 | { | 548 | { |
| 546 | .ctl_name = KERN_CADPID, | 549 | .ctl_name = KERN_CADPID, |
| 547 | .procname = "cad_pid", | 550 | .procname = "cad_pid", |
| @@ -550,6 +553,7 @@ static ctl_table kern_table[] = { | |||
| 550 | .mode = 0600, | 553 | .mode = 0600, |
| 551 | .proc_handler = &proc_do_cad_pid, | 554 | .proc_handler = &proc_do_cad_pid, |
| 552 | }, | 555 | }, |
| 556 | #endif | ||
| 553 | { | 557 | { |
| 554 | .ctl_name = KERN_MAX_THREADS, | 558 | .ctl_name = KERN_MAX_THREADS, |
| 555 | .procname = "threads-max", | 559 | .procname = "threads-max", |
| @@ -1311,7 +1315,9 @@ repeat: | |||
| 1311 | return -ENOTDIR; | 1315 | return -ENOTDIR; |
| 1312 | if (get_user(n, name)) | 1316 | if (get_user(n, name)) |
| 1313 | return -EFAULT; | 1317 | return -EFAULT; |
| 1314 | for ( ; table->ctl_name; table++) { | 1318 | for ( ; table->ctl_name || table->procname; table++) { |
| 1319 | if (!table->ctl_name) | ||
| 1320 | continue; | ||
| 1315 | if (n == table->ctl_name || table->ctl_name == CTL_ANY) { | 1321 | if (n == table->ctl_name || table->ctl_name == CTL_ANY) { |
| 1316 | int error; | 1322 | int error; |
| 1317 | if (table->child) { | 1323 | if (table->child) { |
| @@ -1528,7 +1534,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, | |||
| 1528 | int len; | 1534 | int len; |
| 1529 | mode_t mode; | 1535 | mode_t mode; |
| 1530 | 1536 | ||
| 1531 | for (; table->ctl_name; table++) { | 1537 | for (; table->ctl_name || table->procname; table++) { |
| 1532 | /* Can't do anything without a proc name. */ | 1538 | /* Can't do anything without a proc name. */ |
| 1533 | if (!table->procname) | 1539 | if (!table->procname) |
| 1534 | continue; | 1540 | continue; |
| @@ -1575,7 +1581,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, | |||
| 1575 | static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root) | 1581 | static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root) |
| 1576 | { | 1582 | { |
| 1577 | struct proc_dir_entry *de; | 1583 | struct proc_dir_entry *de; |
| 1578 | for (; table->ctl_name; table++) { | 1584 | for (; table->ctl_name || table->procname; table++) { |
| 1579 | if (!(de = table->de)) | 1585 | if (!(de = table->de)) |
| 1580 | continue; | 1586 | continue; |
| 1581 | if (de->mode & S_IFDIR) { | 1587 | if (de->mode & S_IFDIR) { |
| @@ -2676,13 +2682,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, | |||
| 2676 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) | 2682 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) |
| 2677 | { | 2683 | { |
| 2678 | static int msg_count; | 2684 | static int msg_count; |
| 2685 | struct __sysctl_args tmp; | ||
| 2686 | int name[CTL_MAXNAME]; | ||
| 2687 | int i; | ||
| 2688 | |||
| 2689 | /* Read in the sysctl name for better debug message logging */ | ||
| 2690 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
| 2691 | return -EFAULT; | ||
| 2692 | if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME) | ||
| 2693 | return -ENOTDIR; | ||
| 2694 | for (i = 0; i < tmp.nlen; i++) | ||
| 2695 | if (get_user(name[i], tmp.name + i)) | ||
| 2696 | return -EFAULT; | ||
| 2697 | |||
| 2698 | /* Ignore accesses to kernel.version */ | ||
| 2699 | if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION)) | ||
| 2700 | goto out; | ||
| 2679 | 2701 | ||
| 2680 | if (msg_count < 5) { | 2702 | if (msg_count < 5) { |
| 2681 | msg_count++; | 2703 | msg_count++; |
| 2682 | printk(KERN_INFO | 2704 | printk(KERN_INFO |
| 2683 | "warning: process `%s' used the removed sysctl " | 2705 | "warning: process `%s' used the removed sysctl " |
| 2684 | "system call\n", current->comm); | 2706 | "system call with ", current->comm); |
| 2707 | for (i = 0; i < tmp.nlen; i++) | ||
| 2708 | printk("%d.", name[i]); | ||
| 2709 | printk("\n"); | ||
| 2685 | } | 2710 | } |
| 2711 | out: | ||
| 2686 | return -ENOSYS; | 2712 | return -ENOSYS; |
| 2687 | } | 2713 | } |
| 2688 | 2714 | ||
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 5d6a8c54ee85..f45c5e70773c 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
| @@ -77,7 +77,8 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, | |||
| 77 | /* | 77 | /* |
| 78 | * If new attributes are added, please revisit this allocation | 78 | * If new attributes are added, please revisit this allocation |
| 79 | */ | 79 | */ |
| 80 | skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL); | 80 | size = nlmsg_total_size(genlmsg_total_size(size)); |
| 81 | skb = nlmsg_new(size, GFP_KERNEL); | ||
| 81 | if (!skb) | 82 | if (!skb) |
| 82 | return -ENOMEM; | 83 | return -ENOMEM; |
| 83 | 84 | ||
| @@ -174,21 +175,19 @@ static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) | |||
| 174 | up_write(&listeners->sem); | 175 | up_write(&listeners->sem); |
| 175 | } | 176 | } |
| 176 | 177 | ||
| 177 | static int fill_pid(pid_t pid, struct task_struct *pidtsk, | 178 | static int fill_pid(pid_t pid, struct task_struct *tsk, |
| 178 | struct taskstats *stats) | 179 | struct taskstats *stats) |
| 179 | { | 180 | { |
| 180 | int rc = 0; | 181 | int rc = 0; |
| 181 | struct task_struct *tsk = pidtsk; | ||
| 182 | 182 | ||
| 183 | if (!pidtsk) { | 183 | if (!tsk) { |
| 184 | read_lock(&tasklist_lock); | 184 | rcu_read_lock(); |
| 185 | tsk = find_task_by_pid(pid); | 185 | tsk = find_task_by_pid(pid); |
| 186 | if (!tsk) { | 186 | if (tsk) |
| 187 | read_unlock(&tasklist_lock); | 187 | get_task_struct(tsk); |
| 188 | rcu_read_unlock(); | ||
| 189 | if (!tsk) | ||
| 188 | return -ESRCH; | 190 | return -ESRCH; |
| 189 | } | ||
| 190 | get_task_struct(tsk); | ||
| 191 | read_unlock(&tasklist_lock); | ||
| 192 | } else | 191 | } else |
| 193 | get_task_struct(tsk); | 192 | get_task_struct(tsk); |
| 194 | 193 | ||
| @@ -214,39 +213,30 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk, | |||
| 214 | 213 | ||
| 215 | } | 214 | } |
| 216 | 215 | ||
| 217 | static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, | 216 | static int fill_tgid(pid_t tgid, struct task_struct *first, |
| 218 | struct taskstats *stats) | 217 | struct taskstats *stats) |
| 219 | { | 218 | { |
| 220 | struct task_struct *tsk, *first; | 219 | struct task_struct *tsk; |
| 221 | unsigned long flags; | 220 | unsigned long flags; |
| 221 | int rc = -ESRCH; | ||
| 222 | 222 | ||
| 223 | /* | 223 | /* |
| 224 | * Add additional stats from live tasks except zombie thread group | 224 | * Add additional stats from live tasks except zombie thread group |
| 225 | * leaders who are already counted with the dead tasks | 225 | * leaders who are already counted with the dead tasks |
| 226 | */ | 226 | */ |
| 227 | first = tgidtsk; | 227 | rcu_read_lock(); |
| 228 | if (!first) { | 228 | if (!first) |
| 229 | read_lock(&tasklist_lock); | ||
| 230 | first = find_task_by_pid(tgid); | 229 | first = find_task_by_pid(tgid); |
| 231 | if (!first) { | ||
| 232 | read_unlock(&tasklist_lock); | ||
| 233 | return -ESRCH; | ||
| 234 | } | ||
| 235 | get_task_struct(first); | ||
| 236 | read_unlock(&tasklist_lock); | ||
| 237 | } else | ||
| 238 | get_task_struct(first); | ||
| 239 | 230 | ||
| 240 | /* Start with stats from dead tasks */ | 231 | if (!first || !lock_task_sighand(first, &flags)) |
| 241 | spin_lock_irqsave(&first->signal->stats_lock, flags); | 232 | goto out; |
| 233 | |||
| 242 | if (first->signal->stats) | 234 | if (first->signal->stats) |
| 243 | memcpy(stats, first->signal->stats, sizeof(*stats)); | 235 | memcpy(stats, first->signal->stats, sizeof(*stats)); |
| 244 | spin_unlock_irqrestore(&first->signal->stats_lock, flags); | ||
| 245 | 236 | ||
| 246 | tsk = first; | 237 | tsk = first; |
| 247 | read_lock(&tasklist_lock); | ||
| 248 | do { | 238 | do { |
| 249 | if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk)) | 239 | if (tsk->exit_state) |
| 250 | continue; | 240 | continue; |
| 251 | /* | 241 | /* |
| 252 | * Accounting subsystem can call its functions here to | 242 | * Accounting subsystem can call its functions here to |
| @@ -257,15 +247,18 @@ static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, | |||
| 257 | delayacct_add_tsk(stats, tsk); | 247 | delayacct_add_tsk(stats, tsk); |
| 258 | 248 | ||
| 259 | } while_each_thread(first, tsk); | 249 | } while_each_thread(first, tsk); |
| 260 | read_unlock(&tasklist_lock); | ||
| 261 | stats->version = TASKSTATS_VERSION; | ||
| 262 | 250 | ||
| 251 | unlock_task_sighand(first, &flags); | ||
| 252 | rc = 0; | ||
| 253 | out: | ||
| 254 | rcu_read_unlock(); | ||
| 255 | |||
| 256 | stats->version = TASKSTATS_VERSION; | ||
| 263 | /* | 257 | /* |
| 264 | * Accounting subsytems can also add calls here to modify | 258 | * Accounting subsytems can also add calls here to modify |
| 265 | * fields of taskstats. | 259 | * fields of taskstats. |
| 266 | */ | 260 | */ |
| 267 | 261 | return rc; | |
| 268 | return 0; | ||
| 269 | } | 262 | } |
| 270 | 263 | ||
| 271 | 264 | ||
| @@ -273,7 +266,7 @@ static void fill_tgid_exit(struct task_struct *tsk) | |||
| 273 | { | 266 | { |
| 274 | unsigned long flags; | 267 | unsigned long flags; |
| 275 | 268 | ||
| 276 | spin_lock_irqsave(&tsk->signal->stats_lock, flags); | 269 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
| 277 | if (!tsk->signal->stats) | 270 | if (!tsk->signal->stats) |
| 278 | goto ret; | 271 | goto ret; |
| 279 | 272 | ||
| @@ -285,7 +278,7 @@ static void fill_tgid_exit(struct task_struct *tsk) | |||
| 285 | */ | 278 | */ |
| 286 | delayacct_add_tsk(tsk->signal->stats, tsk); | 279 | delayacct_add_tsk(tsk->signal->stats, tsk); |
| 287 | ret: | 280 | ret: |
| 288 | spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); | 281 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
| 289 | return; | 282 | return; |
| 290 | } | 283 | } |
| 291 | 284 | ||
| @@ -419,7 +412,7 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | |||
| 419 | return send_reply(rep_skb, info->snd_pid); | 412 | return send_reply(rep_skb, info->snd_pid); |
| 420 | 413 | ||
| 421 | nla_put_failure: | 414 | nla_put_failure: |
| 422 | return genlmsg_cancel(rep_skb, reply); | 415 | rc = genlmsg_cancel(rep_skb, reply); |
| 423 | err: | 416 | err: |
| 424 | nlmsg_free(rep_skb); | 417 | nlmsg_free(rep_skb); |
| 425 | return rc; | 418 | return rc; |
| @@ -461,24 +454,26 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
| 461 | size_t size; | 454 | size_t size; |
| 462 | int is_thread_group; | 455 | int is_thread_group; |
| 463 | struct nlattr *na; | 456 | struct nlattr *na; |
| 464 | unsigned long flags; | ||
| 465 | 457 | ||
| 466 | if (!family_registered || !tidstats) | 458 | if (!family_registered) |
| 467 | return; | 459 | return; |
| 468 | 460 | ||
| 469 | spin_lock_irqsave(&tsk->signal->stats_lock, flags); | ||
| 470 | is_thread_group = tsk->signal->stats ? 1 : 0; | ||
| 471 | spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); | ||
| 472 | |||
| 473 | rc = 0; | ||
| 474 | /* | 461 | /* |
| 475 | * Size includes space for nested attributes | 462 | * Size includes space for nested attributes |
| 476 | */ | 463 | */ |
| 477 | size = nla_total_size(sizeof(u32)) + | 464 | size = nla_total_size(sizeof(u32)) + |
| 478 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | 465 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); |
| 479 | 466 | ||
| 480 | if (is_thread_group) | 467 | is_thread_group = (tsk->signal->stats != NULL); |
| 481 | size = 2 * size; /* PID + STATS + TGID + STATS */ | 468 | if (is_thread_group) { |
| 469 | /* PID + STATS + TGID + STATS */ | ||
| 470 | size = 2 * size; | ||
| 471 | /* fill the tsk->signal->stats structure */ | ||
| 472 | fill_tgid_exit(tsk); | ||
| 473 | } | ||
| 474 | |||
| 475 | if (!tidstats) | ||
| 476 | return; | ||
| 482 | 477 | ||
| 483 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); | 478 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); |
| 484 | if (rc < 0) | 479 | if (rc < 0) |
| @@ -498,11 +493,8 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
| 498 | goto send; | 493 | goto send; |
| 499 | 494 | ||
| 500 | /* | 495 | /* |
| 501 | * tsk has/had a thread group so fill the tsk->signal->stats structure | ||
| 502 | * Doesn't matter if tsk is the leader or the last group member leaving | 496 | * Doesn't matter if tsk is the leader or the last group member leaving |
| 503 | */ | 497 | */ |
| 504 | |||
| 505 | fill_tgid_exit(tsk); | ||
| 506 | if (!group_dead) | 498 | if (!group_dead) |
| 507 | goto send; | 499 | goto send; |
| 508 | 500 | ||
| @@ -519,7 +511,6 @@ send: | |||
| 519 | 511 | ||
| 520 | nla_put_failure: | 512 | nla_put_failure: |
| 521 | genlmsg_cancel(rep_skb, reply); | 513 | genlmsg_cancel(rep_skb, reply); |
| 522 | goto ret; | ||
| 523 | err_skb: | 514 | err_skb: |
| 524 | nlmsg_free(rep_skb); | 515 | nlmsg_free(rep_skb); |
| 525 | ret: | 516 | ret: |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 126bb30c4afe..a99b2a6e6a07 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
| @@ -57,7 +57,7 @@ static cycle_t jiffies_read(void) | |||
| 57 | 57 | ||
| 58 | struct clocksource clocksource_jiffies = { | 58 | struct clocksource clocksource_jiffies = { |
| 59 | .name = "jiffies", | 59 | .name = "jiffies", |
| 60 | .rating = 0, /* lowest rating*/ | 60 | .rating = 1, /* lowest valid rating*/ |
| 61 | .read = jiffies_read, | 61 | .read = jiffies_read, |
| 62 | .mask = 0xffffffff, /*32bits*/ | 62 | .mask = 0xffffffff, /*32bits*/ |
| 63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ | 63 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 47195fa0ec4f..3afeaa3a73f9 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -161,9 +161,9 @@ void second_overflow(void) | |||
| 161 | time_adjust += MAX_TICKADJ; | 161 | time_adjust += MAX_TICKADJ; |
| 162 | tick_length -= MAX_TICKADJ_SCALED; | 162 | tick_length -= MAX_TICKADJ_SCALED; |
| 163 | } else { | 163 | } else { |
| 164 | time_adjust = 0; | ||
| 165 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / | 164 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / |
| 166 | HZ) << TICK_LENGTH_SHIFT; | 165 | HZ) << TICK_LENGTH_SHIFT; |
| 166 | time_adjust = 0; | ||
| 167 | } | 167 | } |
| 168 | } | 168 | } |
| 169 | } | 169 | } |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index db443221ba5b..96f77013d3f0 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
| @@ -36,7 +36,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
| 36 | 36 | ||
| 37 | /* calculate task elapsed time in timespec */ | 37 | /* calculate task elapsed time in timespec */ |
| 38 | do_posix_clock_monotonic_gettime(&uptime); | 38 | do_posix_clock_monotonic_gettime(&uptime); |
| 39 | ts = timespec_sub(uptime, current->group_leader->start_time); | 39 | ts = timespec_sub(uptime, tsk->start_time); |
| 40 | /* rebase elapsed time to usec */ | 40 | /* rebase elapsed time to usec */ |
| 41 | ac_etime = timespec_to_ns(&ts); | 41 | ac_etime = timespec_to_ns(&ts); |
| 42 | do_div(ac_etime, NSEC_PER_USEC); | 42 | do_div(ac_etime, NSEC_PER_USEC); |
| @@ -58,7 +58,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
| 58 | stats->ac_uid = tsk->uid; | 58 | stats->ac_uid = tsk->uid; |
| 59 | stats->ac_gid = tsk->gid; | 59 | stats->ac_gid = tsk->gid; |
| 60 | stats->ac_pid = tsk->pid; | 60 | stats->ac_pid = tsk->pid; |
| 61 | stats->ac_ppid = (tsk->parent) ? tsk->parent->pid : 0; | 61 | rcu_read_lock(); |
| 62 | stats->ac_ppid = pid_alive(tsk) ? | ||
| 63 | rcu_dereference(tsk->real_parent)->tgid : 0; | ||
| 64 | rcu_read_unlock(); | ||
| 62 | stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; | 65 | stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; |
| 63 | stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; | 66 | stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; |
| 64 | stats->ac_minflt = tsk->min_flt; | 67 | stats->ac_minflt = tsk->min_flt; |
| @@ -77,13 +80,17 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
| 77 | */ | 80 | */ |
| 78 | void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | 81 | void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) |
| 79 | { | 82 | { |
| 83 | struct mm_struct *mm; | ||
| 84 | |||
| 80 | /* convert pages-jiffies to Mbyte-usec */ | 85 | /* convert pages-jiffies to Mbyte-usec */ |
| 81 | stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; | 86 | stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; |
| 82 | stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; | 87 | stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; |
| 83 | if (p->mm) { | 88 | mm = get_task_mm(p); |
| 89 | if (mm) { | ||
| 84 | /* adjust to KB unit */ | 90 | /* adjust to KB unit */ |
| 85 | stats->hiwater_rss = p->mm->hiwater_rss * PAGE_SIZE / KB; | 91 | stats->hiwater_rss = mm->hiwater_rss * PAGE_SIZE / KB; |
| 86 | stats->hiwater_vm = p->mm->hiwater_vm * PAGE_SIZE / KB; | 92 | stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; |
| 93 | mmput(mm); | ||
| 87 | } | 94 | } |
| 88 | stats->read_char = p->rchar; | 95 | stats->read_char = p->rchar; |
| 89 | stats->write_char = p->wchar; | 96 | stats->write_char = p->wchar; |
diff --git a/kernel/unwind.c b/kernel/unwind.c index 2e2368607aab..f7e50d16dbf6 100644 --- a/kernel/unwind.c +++ b/kernel/unwind.c | |||
| @@ -11,13 +11,15 @@ | |||
| 11 | 11 | ||
| 12 | #include <linux/unwind.h> | 12 | #include <linux/unwind.h> |
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/delay.h> | 14 | #include <linux/bootmem.h> |
| 15 | #include <linux/sort.h> | ||
| 15 | #include <linux/stop_machine.h> | 16 | #include <linux/stop_machine.h> |
| 16 | #include <asm/sections.h> | 17 | #include <asm/sections.h> |
| 17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
| 18 | #include <asm/unaligned.h> | 19 | #include <asm/unaligned.h> |
| 19 | 20 | ||
| 20 | extern char __start_unwind[], __end_unwind[]; | 21 | extern char __start_unwind[], __end_unwind[]; |
| 22 | extern const u8 __start_unwind_hdr[], __end_unwind_hdr[]; | ||
| 21 | 23 | ||
| 22 | #define MAX_STACK_DEPTH 8 | 24 | #define MAX_STACK_DEPTH 8 |
| 23 | 25 | ||
| @@ -100,6 +102,8 @@ static struct unwind_table { | |||
| 100 | } core, init; | 102 | } core, init; |
| 101 | const void *address; | 103 | const void *address; |
| 102 | unsigned long size; | 104 | unsigned long size; |
| 105 | const unsigned char *header; | ||
| 106 | unsigned long hdrsz; | ||
| 103 | struct unwind_table *link; | 107 | struct unwind_table *link; |
| 104 | const char *name; | 108 | const char *name; |
| 105 | } root_table; | 109 | } root_table; |
| @@ -145,6 +149,10 @@ static struct unwind_table *find_table(unsigned long pc) | |||
| 145 | return table; | 149 | return table; |
| 146 | } | 150 | } |
| 147 | 151 | ||
| 152 | static unsigned long read_pointer(const u8 **pLoc, | ||
| 153 | const void *end, | ||
| 154 | signed ptrType); | ||
| 155 | |||
| 148 | static void init_unwind_table(struct unwind_table *table, | 156 | static void init_unwind_table(struct unwind_table *table, |
| 149 | const char *name, | 157 | const char *name, |
| 150 | const void *core_start, | 158 | const void *core_start, |
| @@ -152,14 +160,30 @@ static void init_unwind_table(struct unwind_table *table, | |||
| 152 | const void *init_start, | 160 | const void *init_start, |
| 153 | unsigned long init_size, | 161 | unsigned long init_size, |
| 154 | const void *table_start, | 162 | const void *table_start, |
| 155 | unsigned long table_size) | 163 | unsigned long table_size, |
| 164 | const u8 *header_start, | ||
| 165 | unsigned long header_size) | ||
| 156 | { | 166 | { |
| 167 | const u8 *ptr = header_start + 4; | ||
| 168 | const u8 *end = header_start + header_size; | ||
| 169 | |||
| 157 | table->core.pc = (unsigned long)core_start; | 170 | table->core.pc = (unsigned long)core_start; |
| 158 | table->core.range = core_size; | 171 | table->core.range = core_size; |
| 159 | table->init.pc = (unsigned long)init_start; | 172 | table->init.pc = (unsigned long)init_start; |
| 160 | table->init.range = init_size; | 173 | table->init.range = init_size; |
| 161 | table->address = table_start; | 174 | table->address = table_start; |
| 162 | table->size = table_size; | 175 | table->size = table_size; |
| 176 | /* See if the linker provided table looks valid. */ | ||
| 177 | if (header_size <= 4 | ||
| 178 | || header_start[0] != 1 | ||
| 179 | || (void *)read_pointer(&ptr, end, header_start[1]) != table_start | ||
| 180 | || header_start[2] == DW_EH_PE_omit | ||
| 181 | || read_pointer(&ptr, end, header_start[2]) <= 0 | ||
| 182 | || header_start[3] == DW_EH_PE_omit) | ||
| 183 | header_start = NULL; | ||
| 184 | table->hdrsz = header_size; | ||
| 185 | smp_wmb(); | ||
| 186 | table->header = header_start; | ||
| 163 | table->link = NULL; | 187 | table->link = NULL; |
| 164 | table->name = name; | 188 | table->name = name; |
| 165 | } | 189 | } |
| @@ -169,7 +193,143 @@ void __init unwind_init(void) | |||
| 169 | init_unwind_table(&root_table, "kernel", | 193 | init_unwind_table(&root_table, "kernel", |
| 170 | _text, _end - _text, | 194 | _text, _end - _text, |
| 171 | NULL, 0, | 195 | NULL, 0, |
| 172 | __start_unwind, __end_unwind - __start_unwind); | 196 | __start_unwind, __end_unwind - __start_unwind, |
| 197 | __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr); | ||
| 198 | } | ||
| 199 | |||
| 200 | static const u32 bad_cie, not_fde; | ||
| 201 | static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *); | ||
| 202 | static signed fde_pointer_type(const u32 *cie); | ||
| 203 | |||
| 204 | struct eh_frame_hdr_table_entry { | ||
| 205 | unsigned long start, fde; | ||
| 206 | }; | ||
| 207 | |||
| 208 | static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2) | ||
| 209 | { | ||
| 210 | const struct eh_frame_hdr_table_entry *e1 = p1; | ||
| 211 | const struct eh_frame_hdr_table_entry *e2 = p2; | ||
| 212 | |||
| 213 | return (e1->start > e2->start) - (e1->start < e2->start); | ||
| 214 | } | ||
| 215 | |||
| 216 | static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size) | ||
| 217 | { | ||
| 218 | struct eh_frame_hdr_table_entry *e1 = p1; | ||
| 219 | struct eh_frame_hdr_table_entry *e2 = p2; | ||
| 220 | unsigned long v; | ||
| 221 | |||
| 222 | v = e1->start; | ||
| 223 | e1->start = e2->start; | ||
| 224 | e2->start = v; | ||
| 225 | v = e1->fde; | ||
| 226 | e1->fde = e2->fde; | ||
| 227 | e2->fde = v; | ||
| 228 | } | ||
| 229 | |||
| 230 | static void __init setup_unwind_table(struct unwind_table *table, | ||
| 231 | void *(*alloc)(unsigned long)) | ||
| 232 | { | ||
| 233 | const u8 *ptr; | ||
| 234 | unsigned long tableSize = table->size, hdrSize; | ||
| 235 | unsigned n; | ||
| 236 | const u32 *fde; | ||
| 237 | struct { | ||
| 238 | u8 version; | ||
| 239 | u8 eh_frame_ptr_enc; | ||
| 240 | u8 fde_count_enc; | ||
| 241 | u8 table_enc; | ||
| 242 | unsigned long eh_frame_ptr; | ||
| 243 | unsigned int fde_count; | ||
| 244 | struct eh_frame_hdr_table_entry table[]; | ||
| 245 | } __attribute__((__packed__)) *header; | ||
| 246 | |||
| 247 | if (table->header) | ||
| 248 | return; | ||
| 249 | |||
| 250 | if (table->hdrsz) | ||
| 251 | printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n", | ||
| 252 | table->name); | ||
| 253 | |||
| 254 | if (tableSize & (sizeof(*fde) - 1)) | ||
| 255 | return; | ||
| 256 | |||
| 257 | for (fde = table->address, n = 0; | ||
| 258 | tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; | ||
| 259 | tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { | ||
| 260 | const u32 *cie = cie_for_fde(fde, table); | ||
| 261 | signed ptrType; | ||
| 262 | |||
| 263 | if (cie == ¬_fde) | ||
| 264 | continue; | ||
| 265 | if (cie == NULL | ||
| 266 | || cie == &bad_cie | ||
| 267 | || (ptrType = fde_pointer_type(cie)) < 0) | ||
| 268 | return; | ||
| 269 | ptr = (const u8 *)(fde + 2); | ||
| 270 | if (!read_pointer(&ptr, | ||
| 271 | (const u8 *)(fde + 1) + *fde, | ||
| 272 | ptrType)) | ||
| 273 | return; | ||
| 274 | ++n; | ||
| 275 | } | ||
| 276 | |||
| 277 | if (tableSize || !n) | ||
| 278 | return; | ||
| 279 | |||
| 280 | hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) | ||
| 281 | + 2 * n * sizeof(unsigned long); | ||
| 282 | header = alloc(hdrSize); | ||
| 283 | if (!header) | ||
| 284 | return; | ||
| 285 | header->version = 1; | ||
| 286 | header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native; | ||
| 287 | header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4; | ||
| 288 | header->table_enc = DW_EH_PE_abs|DW_EH_PE_native; | ||
| 289 | put_unaligned((unsigned long)table->address, &header->eh_frame_ptr); | ||
| 290 | BUILD_BUG_ON(offsetof(typeof(*header), fde_count) | ||
| 291 | % __alignof(typeof(header->fde_count))); | ||
| 292 | header->fde_count = n; | ||
| 293 | |||
| 294 | BUILD_BUG_ON(offsetof(typeof(*header), table) | ||
| 295 | % __alignof(typeof(*header->table))); | ||
| 296 | for (fde = table->address, tableSize = table->size, n = 0; | ||
| 297 | tableSize; | ||
| 298 | tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { | ||
| 299 | const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); | ||
| 300 | |||
| 301 | if (!fde[1]) | ||
| 302 | continue; /* this is a CIE */ | ||
| 303 | ptr = (const u8 *)(fde + 2); | ||
| 304 | header->table[n].start = read_pointer(&ptr, | ||
| 305 | (const u8 *)(fde + 1) + *fde, | ||
| 306 | fde_pointer_type(cie)); | ||
| 307 | header->table[n].fde = (unsigned long)fde; | ||
| 308 | ++n; | ||
| 309 | } | ||
| 310 | WARN_ON(n != header->fde_count); | ||
| 311 | |||
| 312 | sort(header->table, | ||
| 313 | n, | ||
| 314 | sizeof(*header->table), | ||
| 315 | cmp_eh_frame_hdr_table_entries, | ||
| 316 | swap_eh_frame_hdr_table_entries); | ||
| 317 | |||
| 318 | table->hdrsz = hdrSize; | ||
| 319 | smp_wmb(); | ||
| 320 | table->header = (const void *)header; | ||
| 321 | } | ||
| 322 | |||
| 323 | static void *__init balloc(unsigned long sz) | ||
| 324 | { | ||
| 325 | return __alloc_bootmem_nopanic(sz, | ||
| 326 | sizeof(unsigned int), | ||
| 327 | __pa(MAX_DMA_ADDRESS)); | ||
| 328 | } | ||
| 329 | |||
| 330 | void __init unwind_setup(void) | ||
| 331 | { | ||
| 332 | setup_unwind_table(&root_table, balloc); | ||
| 173 | } | 333 | } |
| 174 | 334 | ||
| 175 | #ifdef CONFIG_MODULES | 335 | #ifdef CONFIG_MODULES |
| @@ -193,7 +353,8 @@ void *unwind_add_table(struct module *module, | |||
| 193 | init_unwind_table(table, module->name, | 353 | init_unwind_table(table, module->name, |
| 194 | module->module_core, module->core_size, | 354 | module->module_core, module->core_size, |
| 195 | module->module_init, module->init_size, | 355 | module->module_init, module->init_size, |
| 196 | table_start, table_size); | 356 | table_start, table_size, |
| 357 | NULL, 0); | ||
| 197 | 358 | ||
| 198 | if (last_table) | 359 | if (last_table) |
| 199 | last_table->link = table; | 360 | last_table->link = table; |
| @@ -303,6 +464,26 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) | |||
| 303 | return value; | 464 | return value; |
| 304 | } | 465 | } |
| 305 | 466 | ||
| 467 | static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) | ||
| 468 | { | ||
| 469 | const u32 *cie; | ||
| 470 | |||
| 471 | if (!*fde || (*fde & (sizeof(*fde) - 1))) | ||
| 472 | return &bad_cie; | ||
| 473 | if (!fde[1]) | ||
| 474 | return ¬_fde; /* this is a CIE */ | ||
| 475 | if ((fde[1] & (sizeof(*fde) - 1)) | ||
| 476 | || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) | ||
| 477 | return NULL; /* this is not a valid FDE */ | ||
| 478 | cie = fde + 1 - fde[1] / sizeof(*fde); | ||
| 479 | if (*cie <= sizeof(*cie) + 4 | ||
| 480 | || *cie >= fde[1] - sizeof(*fde) | ||
| 481 | || (*cie & (sizeof(*cie) - 1)) | ||
| 482 | || cie[1]) | ||
| 483 | return NULL; /* this is not a (valid) CIE */ | ||
| 484 | return cie; | ||
| 485 | } | ||
| 486 | |||
| 306 | static unsigned long read_pointer(const u8 **pLoc, | 487 | static unsigned long read_pointer(const u8 **pLoc, |
| 307 | const void *end, | 488 | const void *end, |
| 308 | signed ptrType) | 489 | signed ptrType) |
| @@ -610,49 +791,108 @@ int unwind(struct unwind_frame_info *frame) | |||
| 610 | unsigned i; | 791 | unsigned i; |
| 611 | signed ptrType = -1; | 792 | signed ptrType = -1; |
| 612 | uleb128_t retAddrReg = 0; | 793 | uleb128_t retAddrReg = 0; |
| 613 | struct unwind_table *table; | 794 | const struct unwind_table *table; |
| 614 | struct unwind_state state; | 795 | struct unwind_state state; |
| 615 | 796 | ||
| 616 | if (UNW_PC(frame) == 0) | 797 | if (UNW_PC(frame) == 0) |
| 617 | return -EINVAL; | 798 | return -EINVAL; |
| 618 | if ((table = find_table(pc)) != NULL | 799 | if ((table = find_table(pc)) != NULL |
| 619 | && !(table->size & (sizeof(*fde) - 1))) { | 800 | && !(table->size & (sizeof(*fde) - 1))) { |
| 620 | unsigned long tableSize = table->size; | 801 | const u8 *hdr = table->header; |
| 621 | 802 | unsigned long tableSize; | |
| 622 | for (fde = table->address; | 803 | |
| 623 | tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; | 804 | smp_rmb(); |
| 624 | tableSize -= sizeof(*fde) + *fde, | 805 | if (hdr && hdr[0] == 1) { |
| 625 | fde += 1 + *fde / sizeof(*fde)) { | 806 | switch(hdr[3] & DW_EH_PE_FORM) { |
| 626 | if (!*fde || (*fde & (sizeof(*fde) - 1))) | 807 | case DW_EH_PE_native: tableSize = sizeof(unsigned long); break; |
| 627 | break; | 808 | case DW_EH_PE_data2: tableSize = 2; break; |
| 628 | if (!fde[1]) | 809 | case DW_EH_PE_data4: tableSize = 4; break; |
| 629 | continue; /* this is a CIE */ | 810 | case DW_EH_PE_data8: tableSize = 8; break; |
| 630 | if ((fde[1] & (sizeof(*fde) - 1)) | 811 | default: tableSize = 0; break; |
| 631 | || fde[1] > (unsigned long)(fde + 1) | 812 | } |
| 632 | - (unsigned long)table->address) | 813 | ptr = hdr + 4; |
| 633 | continue; /* this is not a valid FDE */ | 814 | end = hdr + table->hdrsz; |
| 634 | cie = fde + 1 - fde[1] / sizeof(*fde); | 815 | if (tableSize |
| 635 | if (*cie <= sizeof(*cie) + 4 | 816 | && read_pointer(&ptr, end, hdr[1]) |
| 636 | || *cie >= fde[1] - sizeof(*fde) | 817 | == (unsigned long)table->address |
| 637 | || (*cie & (sizeof(*cie) - 1)) | 818 | && (i = read_pointer(&ptr, end, hdr[2])) > 0 |
| 638 | || cie[1] | 819 | && i == (end - ptr) / (2 * tableSize) |
| 639 | || (ptrType = fde_pointer_type(cie)) < 0) { | 820 | && !((end - ptr) % (2 * tableSize))) { |
| 640 | cie = NULL; /* this is not a (valid) CIE */ | 821 | do { |
| 641 | continue; | 822 | const u8 *cur = ptr + (i / 2) * (2 * tableSize); |
| 823 | |||
| 824 | startLoc = read_pointer(&cur, | ||
| 825 | cur + tableSize, | ||
| 826 | hdr[3]); | ||
| 827 | if (pc < startLoc) | ||
| 828 | i /= 2; | ||
| 829 | else { | ||
| 830 | ptr = cur - tableSize; | ||
| 831 | i = (i + 1) / 2; | ||
| 832 | } | ||
| 833 | } while (startLoc && i > 1); | ||
| 834 | if (i == 1 | ||
| 835 | && (startLoc = read_pointer(&ptr, | ||
| 836 | ptr + tableSize, | ||
| 837 | hdr[3])) != 0 | ||
| 838 | && pc >= startLoc) | ||
| 839 | fde = (void *)read_pointer(&ptr, | ||
| 840 | ptr + tableSize, | ||
| 841 | hdr[3]); | ||
| 642 | } | 842 | } |
| 843 | } | ||
| 844 | |||
| 845 | if (fde != NULL) { | ||
| 846 | cie = cie_for_fde(fde, table); | ||
| 643 | ptr = (const u8 *)(fde + 2); | 847 | ptr = (const u8 *)(fde + 2); |
| 644 | startLoc = read_pointer(&ptr, | 848 | if(cie != NULL |
| 645 | (const u8 *)(fde + 1) + *fde, | 849 | && cie != &bad_cie |
| 646 | ptrType); | 850 | && cie != ¬_fde |
| 647 | endLoc = startLoc | 851 | && (ptrType = fde_pointer_type(cie)) >= 0 |
| 648 | + read_pointer(&ptr, | 852 | && read_pointer(&ptr, |
| 649 | (const u8 *)(fde + 1) + *fde, | 853 | (const u8 *)(fde + 1) + *fde, |
| 650 | ptrType & DW_EH_PE_indirect | 854 | ptrType) == startLoc) { |
| 651 | ? ptrType | 855 | if (!(ptrType & DW_EH_PE_indirect)) |
| 652 | : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); | 856 | ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; |
| 653 | if (pc >= startLoc && pc < endLoc) | 857 | endLoc = startLoc |
| 654 | break; | 858 | + read_pointer(&ptr, |
| 655 | cie = NULL; | 859 | (const u8 *)(fde + 1) + *fde, |
| 860 | ptrType); | ||
| 861 | if(pc >= endLoc) | ||
| 862 | fde = NULL; | ||
| 863 | } else | ||
| 864 | fde = NULL; | ||
| 865 | } | ||
| 866 | if (fde == NULL) { | ||
| 867 | for (fde = table->address, tableSize = table->size; | ||
| 868 | cie = NULL, tableSize > sizeof(*fde) | ||
| 869 | && tableSize - sizeof(*fde) >= *fde; | ||
| 870 | tableSize -= sizeof(*fde) + *fde, | ||
| 871 | fde += 1 + *fde / sizeof(*fde)) { | ||
| 872 | cie = cie_for_fde(fde, table); | ||
| 873 | if (cie == &bad_cie) { | ||
| 874 | cie = NULL; | ||
| 875 | break; | ||
| 876 | } | ||
| 877 | if (cie == NULL | ||
| 878 | || cie == ¬_fde | ||
| 879 | || (ptrType = fde_pointer_type(cie)) < 0) | ||
| 880 | continue; | ||
| 881 | ptr = (const u8 *)(fde + 2); | ||
| 882 | startLoc = read_pointer(&ptr, | ||
| 883 | (const u8 *)(fde + 1) + *fde, | ||
| 884 | ptrType); | ||
| 885 | if (!startLoc) | ||
| 886 | continue; | ||
| 887 | if (!(ptrType & DW_EH_PE_indirect)) | ||
| 888 | ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; | ||
| 889 | endLoc = startLoc | ||
| 890 | + read_pointer(&ptr, | ||
| 891 | (const u8 *)(fde + 1) + *fde, | ||
| 892 | ptrType); | ||
| 893 | if (pc >= startLoc && pc < endLoc) | ||
| 894 | break; | ||
| 895 | } | ||
| 656 | } | 896 | } |
| 657 | } | 897 | } |
| 658 | if (cie != NULL) { | 898 | if (cie != NULL) { |
diff --git a/kernel/user.c b/kernel/user.c index 6408c0424291..220e586127a0 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -187,6 +187,17 @@ void switch_uid(struct user_struct *new_user) | |||
| 187 | atomic_dec(&old_user->processes); | 187 | atomic_dec(&old_user->processes); |
| 188 | switch_uid_keyring(new_user); | 188 | switch_uid_keyring(new_user); |
| 189 | current->user = new_user; | 189 | current->user = new_user; |
| 190 | |||
| 191 | /* | ||
| 192 | * We need to synchronize with __sigqueue_alloc() | ||
| 193 | * doing a get_uid(p->user).. If that saw the old | ||
| 194 | * user value, we need to wait until it has exited | ||
| 195 | * its critical region before we can free the old | ||
| 196 | * structure. | ||
| 197 | */ | ||
| 198 | smp_mb(); | ||
| 199 | spin_unlock_wait(¤t->sighand->siglock); | ||
| 200 | |||
| 190 | free_uid(old_user); | 201 | free_uid(old_user); |
| 191 | suid_keys(current); | 202 | suid_keys(current); |
| 192 | } | 203 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index cfc737bffe6d..17c2f03d2c27 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/notifier.h> | 28 | #include <linux/notifier.h> |
| 29 | #include <linux/kthread.h> | 29 | #include <linux/kthread.h> |
| 30 | #include <linux/hardirq.h> | 30 | #include <linux/hardirq.h> |
| 31 | #include <linux/mempolicy.h> | ||
| 31 | 32 | ||
| 32 | /* | 33 | /* |
| 33 | * The per-CPU workqueue (if single thread, we always use the first | 34 | * The per-CPU workqueue (if single thread, we always use the first |
| @@ -98,7 +99,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
| 98 | * @wq: workqueue to use | 99 | * @wq: workqueue to use |
| 99 | * @work: work to queue | 100 | * @work: work to queue |
| 100 | * | 101 | * |
| 101 | * Returns non-zero if it was successfully added. | 102 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
| 102 | * | 103 | * |
| 103 | * We queue the work to the CPU it was submitted, but there is no | 104 | * We queue the work to the CPU it was submitted, but there is no |
| 104 | * guarantee that it will be processed by that CPU. | 105 | * guarantee that it will be processed by that CPU. |
| @@ -137,7 +138,7 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
| 137 | * @work: work to queue | 138 | * @work: work to queue |
| 138 | * @delay: number of jiffies to wait before queueing | 139 | * @delay: number of jiffies to wait before queueing |
| 139 | * | 140 | * |
| 140 | * Returns non-zero if it was successfully added. | 141 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
| 141 | */ | 142 | */ |
| 142 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, |
| 143 | struct work_struct *work, unsigned long delay) | 144 | struct work_struct *work, unsigned long delay) |
| @@ -168,7 +169,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); | |||
| 168 | * @work: work to queue | 169 | * @work: work to queue |
| 169 | * @delay: number of jiffies to wait before queueing | 170 | * @delay: number of jiffies to wait before queueing |
| 170 | * | 171 | * |
| 171 | * Returns non-zero if it was successfully added. | 172 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
| 172 | */ | 173 | */ |
| 173 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 174 | struct work_struct *work, unsigned long delay) | 175 | struct work_struct *work, unsigned long delay) |
| @@ -245,6 +246,12 @@ static int worker_thread(void *__cwq) | |||
| 245 | sigprocmask(SIG_BLOCK, &blocked, NULL); | 246 | sigprocmask(SIG_BLOCK, &blocked, NULL); |
| 246 | flush_signals(current); | 247 | flush_signals(current); |
| 247 | 248 | ||
| 249 | /* | ||
| 250 | * We inherited MPOL_INTERLEAVE from the booting kernel. | ||
| 251 | * Set MPOL_DEFAULT to insure node local allocations. | ||
| 252 | */ | ||
| 253 | numa_default_policy(); | ||
| 254 | |||
| 248 | /* SIG_IGN makes children autoreap: see do_notify_parent(). */ | 255 | /* SIG_IGN makes children autoreap: see do_notify_parent(). */ |
| 249 | sa.sa.sa_handler = SIG_IGN; | 256 | sa.sa.sa_handler = SIG_IGN; |
| 250 | sa.sa.sa_flags = 0; | 257 | sa.sa.sa_flags = 0; |
