diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/compat.c | 35 | ||||
-rw-r--r-- | kernel/cpu.c | 14 | ||||
-rw-r--r-- | kernel/delayacct.c | 15 | ||||
-rw-r--r-- | kernel/exit.c | 1 | ||||
-rw-r--r-- | kernel/fork.c | 8 | ||||
-rw-r--r-- | kernel/futex.c | 7 | ||||
-rw-r--r-- | kernel/irq/chip.c | 2 | ||||
-rw-r--r-- | kernel/irq/handle.c | 4 | ||||
-rw-r--r-- | kernel/irq/manage.c | 9 | ||||
-rw-r--r-- | kernel/kmod.c | 8 | ||||
-rw-r--r-- | kernel/lockdep.c | 3 | ||||
-rw-r--r-- | kernel/module.c | 4 | ||||
-rw-r--r-- | kernel/power/disk.c | 37 | ||||
-rw-r--r-- | kernel/printk.c | 21 | ||||
-rw-r--r-- | kernel/signal.c | 15 | ||||
-rw-r--r-- | kernel/spinlock.c | 21 | ||||
-rw-r--r-- | kernel/sys_ni.c | 1 | ||||
-rw-r--r-- | kernel/sysctl.c | 30 | ||||
-rw-r--r-- | kernel/taskstats.c | 87 | ||||
-rw-r--r-- | kernel/time/ntp.c | 2 | ||||
-rw-r--r-- | kernel/tsacct.c | 17 | ||||
-rw-r--r-- | kernel/unwind.c | 327 | ||||
-rw-r--r-- | kernel/user.c | 11 | ||||
-rw-r--r-- | kernel/workqueue.c | 6 |
24 files changed, 538 insertions, 147 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 75573e5d27b0..6952dd057300 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -678,7 +678,7 @@ int get_compat_sigevent(struct sigevent *event, | |||
678 | ? -EFAULT : 0; | 678 | ? -EFAULT : 0; |
679 | } | 679 | } |
680 | 680 | ||
681 | long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask, | 681 | long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, |
682 | unsigned long bitmap_size) | 682 | unsigned long bitmap_size) |
683 | { | 683 | { |
684 | int i, j; | 684 | int i, j; |
@@ -982,4 +982,37 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, | |||
982 | } | 982 | } |
983 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); | 983 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); |
984 | } | 984 | } |
985 | |||
986 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, | ||
987 | compat_ulong_t maxnode, | ||
988 | const compat_ulong_t __user *old_nodes, | ||
989 | const compat_ulong_t __user *new_nodes) | ||
990 | { | ||
991 | unsigned long __user *old = NULL; | ||
992 | unsigned long __user *new = NULL; | ||
993 | nodemask_t tmp_mask; | ||
994 | unsigned long nr_bits; | ||
995 | unsigned long size; | ||
996 | |||
997 | nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); | ||
998 | size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | ||
999 | if (old_nodes) { | ||
1000 | if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) | ||
1001 | return -EFAULT; | ||
1002 | old = compat_alloc_user_space(new_nodes ? size * 2 : size); | ||
1003 | if (new_nodes) | ||
1004 | new = old + size / sizeof(unsigned long); | ||
1005 | if (copy_to_user(old, nodes_addr(tmp_mask), size)) | ||
1006 | return -EFAULT; | ||
1007 | } | ||
1008 | if (new_nodes) { | ||
1009 | if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) | ||
1010 | return -EFAULT; | ||
1011 | if (new == NULL) | ||
1012 | new = compat_alloc_user_space(size); | ||
1013 | if (copy_to_user(new, nodes_addr(tmp_mask), size)) | ||
1014 | return -EFAULT; | ||
1015 | } | ||
1016 | return sys_migrate_pages(pid, nr_bits + 1, old, new); | ||
1017 | } | ||
985 | #endif | 1018 | #endif |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 27dd3ee47099..272254f20d97 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -58,8 +58,8 @@ void unlock_cpu_hotplug(void) | |||
58 | recursive_depth--; | 58 | recursive_depth--; |
59 | return; | 59 | return; |
60 | } | 60 | } |
61 | mutex_unlock(&cpu_bitmask_lock); | ||
62 | recursive = NULL; | 61 | recursive = NULL; |
62 | mutex_unlock(&cpu_bitmask_lock); | ||
63 | } | 63 | } |
64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | 64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); |
65 | 65 | ||
@@ -150,18 +150,18 @@ static int _cpu_down(unsigned int cpu) | |||
150 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | 150 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
151 | mutex_unlock(&cpu_bitmask_lock); | 151 | mutex_unlock(&cpu_bitmask_lock); |
152 | 152 | ||
153 | if (IS_ERR(p)) { | 153 | if (IS_ERR(p) || cpu_online(cpu)) { |
154 | /* CPU didn't die: tell everyone. Can't complain. */ | 154 | /* CPU didn't die: tell everyone. Can't complain. */ |
155 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 155 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
156 | (void *)(long)cpu) == NOTIFY_BAD) | 156 | (void *)(long)cpu) == NOTIFY_BAD) |
157 | BUG(); | 157 | BUG(); |
158 | 158 | ||
159 | err = PTR_ERR(p); | 159 | if (IS_ERR(p)) { |
160 | goto out_allowed; | 160 | err = PTR_ERR(p); |
161 | } | 161 | goto out_allowed; |
162 | 162 | } | |
163 | if (cpu_online(cpu)) | ||
164 | goto out_thread; | 163 | goto out_thread; |
164 | } | ||
165 | 165 | ||
166 | /* Wait for it to sleep (leaving idle task). */ | 166 | /* Wait for it to sleep (leaving idle task). */ |
167 | while (!idle_cpu(cpu)) | 167 | while (!idle_cpu(cpu)) |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 36752f124c6a..66a0ea48751d 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -66,6 +66,7 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
66 | { | 66 | { |
67 | struct timespec ts; | 67 | struct timespec ts; |
68 | s64 ns; | 68 | s64 ns; |
69 | unsigned long flags; | ||
69 | 70 | ||
70 | do_posix_clock_monotonic_gettime(end); | 71 | do_posix_clock_monotonic_gettime(end); |
71 | ts = timespec_sub(*end, *start); | 72 | ts = timespec_sub(*end, *start); |
@@ -73,10 +74,10 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
73 | if (ns < 0) | 74 | if (ns < 0) |
74 | return; | 75 | return; |
75 | 76 | ||
76 | spin_lock(¤t->delays->lock); | 77 | spin_lock_irqsave(¤t->delays->lock, flags); |
77 | *total += ns; | 78 | *total += ns; |
78 | (*count)++; | 79 | (*count)++; |
79 | spin_unlock(¤t->delays->lock); | 80 | spin_unlock_irqrestore(¤t->delays->lock, flags); |
80 | } | 81 | } |
81 | 82 | ||
82 | void __delayacct_blkio_start(void) | 83 | void __delayacct_blkio_start(void) |
@@ -104,6 +105,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
104 | s64 tmp; | 105 | s64 tmp; |
105 | struct timespec ts; | 106 | struct timespec ts; |
106 | unsigned long t1,t2,t3; | 107 | unsigned long t1,t2,t3; |
108 | unsigned long flags; | ||
107 | 109 | ||
108 | /* Though tsk->delays accessed later, early exit avoids | 110 | /* Though tsk->delays accessed later, early exit avoids |
109 | * unnecessary returning of other data | 111 | * unnecessary returning of other data |
@@ -136,14 +138,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
136 | 138 | ||
137 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ | 139 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ |
138 | 140 | ||
139 | spin_lock(&tsk->delays->lock); | 141 | spin_lock_irqsave(&tsk->delays->lock, flags); |
140 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; | 142 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; |
141 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; | 143 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; |
142 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; | 144 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; |
143 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; | 145 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; |
144 | d->blkio_count += tsk->delays->blkio_count; | 146 | d->blkio_count += tsk->delays->blkio_count; |
145 | d->swapin_count += tsk->delays->swapin_count; | 147 | d->swapin_count += tsk->delays->swapin_count; |
146 | spin_unlock(&tsk->delays->lock); | 148 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
147 | 149 | ||
148 | done: | 150 | done: |
149 | return 0; | 151 | return 0; |
@@ -152,11 +154,12 @@ done: | |||
152 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) | 154 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) |
153 | { | 155 | { |
154 | __u64 ret; | 156 | __u64 ret; |
157 | unsigned long flags; | ||
155 | 158 | ||
156 | spin_lock(&tsk->delays->lock); | 159 | spin_lock_irqsave(&tsk->delays->lock, flags); |
157 | ret = nsec_to_clock_t(tsk->delays->blkio_delay + | 160 | ret = nsec_to_clock_t(tsk->delays->blkio_delay + |
158 | tsk->delays->swapin_delay); | 161 | tsk->delays->swapin_delay); |
159 | spin_unlock(&tsk->delays->lock); | 162 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
160 | return ret; | 163 | return ret; |
161 | } | 164 | } |
162 | 165 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index f250a5e3e281..06de6c4e8ca3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -128,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk) | |||
128 | flush_sigqueue(&tsk->pending); | 128 | flush_sigqueue(&tsk->pending); |
129 | if (sig) { | 129 | if (sig) { |
130 | flush_sigqueue(&sig->shared_pending); | 130 | flush_sigqueue(&sig->shared_pending); |
131 | taskstats_tgid_free(sig); | ||
131 | __cleanup_signal(sig); | 132 | __cleanup_signal(sig); |
132 | } | 133 | } |
133 | } | 134 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 29ebb30850ed..8cdd3e72ba55 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -830,7 +830,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
830 | if (clone_flags & CLONE_THREAD) { | 830 | if (clone_flags & CLONE_THREAD) { |
831 | atomic_inc(¤t->signal->count); | 831 | atomic_inc(¤t->signal->count); |
832 | atomic_inc(¤t->signal->live); | 832 | atomic_inc(¤t->signal->live); |
833 | taskstats_tgid_alloc(current->signal); | 833 | taskstats_tgid_alloc(current); |
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
836 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 836 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
@@ -897,7 +897,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
897 | void __cleanup_signal(struct signal_struct *sig) | 897 | void __cleanup_signal(struct signal_struct *sig) |
898 | { | 898 | { |
899 | exit_thread_group_keys(sig); | 899 | exit_thread_group_keys(sig); |
900 | taskstats_tgid_free(sig); | ||
901 | kmem_cache_free(signal_cachep, sig); | 900 | kmem_cache_free(signal_cachep, sig); |
902 | } | 901 | } |
903 | 902 | ||
@@ -1316,9 +1315,8 @@ struct task_struct * __devinit fork_idle(int cpu) | |||
1316 | struct pt_regs regs; | 1315 | struct pt_regs regs; |
1317 | 1316 | ||
1318 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); | 1317 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, NULL, 0); |
1319 | if (!task) | 1318 | if (!IS_ERR(task)) |
1320 | return ERR_PTR(-ENOMEM); | 1319 | init_idle(task, cpu); |
1321 | init_idle(task, cpu); | ||
1322 | 1320 | ||
1323 | return task; | 1321 | return task; |
1324 | } | 1322 | } |
diff --git a/kernel/futex.c b/kernel/futex.c index b364e0026191..93ef30ba209f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1507,6 +1507,13 @@ static int futex_fd(u32 __user *uaddr, int signal) | |||
1507 | struct futex_q *q; | 1507 | struct futex_q *q; |
1508 | struct file *filp; | 1508 | struct file *filp; |
1509 | int ret, err; | 1509 | int ret, err; |
1510 | static unsigned long printk_interval; | ||
1511 | |||
1512 | if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { | ||
1513 | printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " | ||
1514 | "will be removed from the kernel in June 2007\n", | ||
1515 | current->comm); | ||
1516 | } | ||
1510 | 1517 | ||
1511 | ret = -EINVAL; | 1518 | ret = -EINVAL; |
1512 | if (!valid_signal(signal)) | 1519 | if (!valid_signal(signal)) |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 2d0dc3efe813..ebfd24a41858 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -233,6 +233,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) | |||
233 | chip->shutdown = chip->disable; | 233 | chip->shutdown = chip->disable; |
234 | if (!chip->name) | 234 | if (!chip->name) |
235 | chip->name = chip->typename; | 235 | chip->name = chip->typename; |
236 | if (!chip->end) | ||
237 | chip->end = dummy_irq_chip.end; | ||
236 | } | 238 | } |
237 | 239 | ||
238 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) | 240 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 42aa6f1a3f0f..a681912bc89a 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -231,10 +231,10 @@ fastcall unsigned int __do_IRQ(unsigned int irq) | |||
231 | spin_unlock(&desc->lock); | 231 | spin_unlock(&desc->lock); |
232 | 232 | ||
233 | action_ret = handle_IRQ_event(irq, action); | 233 | action_ret = handle_IRQ_event(irq, action); |
234 | |||
235 | spin_lock(&desc->lock); | ||
236 | if (!noirqdebug) | 234 | if (!noirqdebug) |
237 | note_interrupt(irq, desc, action_ret); | 235 | note_interrupt(irq, desc, action_ret); |
236 | |||
237 | spin_lock(&desc->lock); | ||
238 | if (likely(!(desc->status & IRQ_PENDING))) | 238 | if (likely(!(desc->status & IRQ_PENDING))) |
239 | break; | 239 | break; |
240 | desc->status &= ~IRQ_PENDING; | 240 | desc->status &= ~IRQ_PENDING; |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6879202afe9a..b385878c6e80 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -216,6 +216,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
216 | { | 216 | { |
217 | struct irq_desc *desc = irq_desc + irq; | 217 | struct irq_desc *desc = irq_desc + irq; |
218 | struct irqaction *old, **p; | 218 | struct irqaction *old, **p; |
219 | const char *old_name = NULL; | ||
219 | unsigned long flags; | 220 | unsigned long flags; |
220 | int shared = 0; | 221 | int shared = 0; |
221 | 222 | ||
@@ -255,8 +256,10 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
255 | * set the trigger type must match. | 256 | * set the trigger type must match. |
256 | */ | 257 | */ |
257 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 258 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
258 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) | 259 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { |
260 | old_name = old->name; | ||
259 | goto mismatch; | 261 | goto mismatch; |
262 | } | ||
260 | 263 | ||
261 | #if defined(CONFIG_IRQ_PER_CPU) | 264 | #if defined(CONFIG_IRQ_PER_CPU) |
262 | /* All handlers must agree on per-cpuness */ | 265 | /* All handlers must agree on per-cpuness */ |
@@ -322,11 +325,13 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
322 | return 0; | 325 | return 0; |
323 | 326 | ||
324 | mismatch: | 327 | mismatch: |
325 | spin_unlock_irqrestore(&desc->lock, flags); | ||
326 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 328 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
327 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | 329 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); |
330 | if (old_name) | ||
331 | printk(KERN_ERR "current handler: %s\n", old_name); | ||
328 | dump_stack(); | 332 | dump_stack(); |
329 | } | 333 | } |
334 | spin_unlock_irqrestore(&desc->lock, flags); | ||
330 | return -EBUSY; | 335 | return -EBUSY; |
331 | } | 336 | } |
332 | 337 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index bb4e29d924e4..2b76dee28496 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
@@ -307,14 +307,14 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
307 | return 0; | 307 | return 0; |
308 | 308 | ||
309 | f = create_write_pipe(); | 309 | f = create_write_pipe(); |
310 | if (!f) | 310 | if (IS_ERR(f)) |
311 | return -ENOMEM; | 311 | return PTR_ERR(f); |
312 | *filp = f; | 312 | *filp = f; |
313 | 313 | ||
314 | f = create_read_pipe(f); | 314 | f = create_read_pipe(f); |
315 | if (!f) { | 315 | if (IS_ERR(f)) { |
316 | free_write_pipe(*filp); | 316 | free_write_pipe(*filp); |
317 | return -ENOMEM; | 317 | return PTR_ERR(f); |
318 | } | 318 | } |
319 | sub_info.stdin = f; | 319 | sub_info.stdin = f; |
320 | 320 | ||
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b739be2a6dc9..c9fefdb1a7db 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1081,7 +1081,8 @@ static int static_obj(void *obj) | |||
1081 | */ | 1081 | */ |
1082 | for_each_possible_cpu(i) { | 1082 | for_each_possible_cpu(i) { |
1083 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | 1083 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); |
1084 | end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); | 1084 | end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM |
1085 | + per_cpu_offset(i); | ||
1085 | 1086 | ||
1086 | if ((addr >= start) && (addr < end)) | 1087 | if ((addr >= start) && (addr < end)) |
1087 | return 1; | 1088 | return 1; |
diff --git a/kernel/module.c b/kernel/module.c index 67009bd56c52..f0166563c602 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1342,7 +1342,7 @@ static void set_license(struct module *mod, const char *license) | |||
1342 | 1342 | ||
1343 | if (!license_is_gpl_compatible(license)) { | 1343 | if (!license_is_gpl_compatible(license)) { |
1344 | if (!(tainted & TAINT_PROPRIETARY_MODULE)) | 1344 | if (!(tainted & TAINT_PROPRIETARY_MODULE)) |
1345 | printk(KERN_WARNING "%s: module license '%s' taints" | 1345 | printk(KERN_WARNING "%s: module license '%s' taints " |
1346 | "kernel.\n", mod->name, license); | 1346 | "kernel.\n", mod->name, license); |
1347 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); | 1347 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
1348 | } | 1348 | } |
@@ -1718,7 +1718,7 @@ static struct module *load_module(void __user *umod, | |||
1718 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); | 1718 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); |
1719 | 1719 | ||
1720 | if (strcmp(mod->name, "ndiswrapper") == 0) | 1720 | if (strcmp(mod->name, "ndiswrapper") == 0) |
1721 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); | 1721 | add_taint(TAINT_PROPRIETARY_MODULE); |
1722 | if (strcmp(mod->name, "driverloader") == 0) | 1722 | if (strcmp(mod->name, "driverloader") == 0) |
1723 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); | 1723 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
1724 | 1724 | ||
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index d3a158a60312..b1fb7866b0b3 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -71,7 +71,7 @@ static inline void platform_finish(void) | |||
71 | 71 | ||
72 | static int prepare_processes(void) | 72 | static int prepare_processes(void) |
73 | { | 73 | { |
74 | int error; | 74 | int error = 0; |
75 | 75 | ||
76 | pm_prepare_console(); | 76 | pm_prepare_console(); |
77 | 77 | ||
@@ -84,6 +84,12 @@ static int prepare_processes(void) | |||
84 | goto thaw; | 84 | goto thaw; |
85 | } | 85 | } |
86 | 86 | ||
87 | if (pm_disk_mode == PM_DISK_TESTPROC) { | ||
88 | printk("swsusp debug: Waiting for 5 seconds.\n"); | ||
89 | mdelay(5000); | ||
90 | goto thaw; | ||
91 | } | ||
92 | |||
87 | /* Free memory before shutting down devices. */ | 93 | /* Free memory before shutting down devices. */ |
88 | if (!(error = swsusp_shrink_memory())) | 94 | if (!(error = swsusp_shrink_memory())) |
89 | return 0; | 95 | return 0; |
@@ -120,13 +126,21 @@ int pm_suspend_disk(void) | |||
120 | if (error) | 126 | if (error) |
121 | return error; | 127 | return error; |
122 | 128 | ||
129 | if (pm_disk_mode == PM_DISK_TESTPROC) | ||
130 | goto Thaw; | ||
131 | |||
123 | suspend_console(); | 132 | suspend_console(); |
124 | error = device_suspend(PMSG_FREEZE); | 133 | error = device_suspend(PMSG_FREEZE); |
125 | if (error) { | 134 | if (error) { |
126 | resume_console(); | 135 | resume_console(); |
127 | printk("Some devices failed to suspend\n"); | 136 | printk("Some devices failed to suspend\n"); |
128 | unprepare_processes(); | 137 | goto Thaw; |
129 | return error; | 138 | } |
139 | |||
140 | if (pm_disk_mode == PM_DISK_TEST) { | ||
141 | printk("swsusp debug: Waiting for 5 seconds.\n"); | ||
142 | mdelay(5000); | ||
143 | goto Done; | ||
130 | } | 144 | } |
131 | 145 | ||
132 | pr_debug("PM: snapshotting memory.\n"); | 146 | pr_debug("PM: snapshotting memory.\n"); |
@@ -143,16 +157,17 @@ int pm_suspend_disk(void) | |||
143 | power_down(pm_disk_mode); | 157 | power_down(pm_disk_mode); |
144 | else { | 158 | else { |
145 | swsusp_free(); | 159 | swsusp_free(); |
146 | unprepare_processes(); | 160 | goto Thaw; |
147 | return error; | ||
148 | } | 161 | } |
149 | } else | 162 | } else { |
150 | pr_debug("PM: Image restored successfully.\n"); | 163 | pr_debug("PM: Image restored successfully.\n"); |
164 | } | ||
151 | 165 | ||
152 | swsusp_free(); | 166 | swsusp_free(); |
153 | Done: | 167 | Done: |
154 | device_resume(); | 168 | device_resume(); |
155 | resume_console(); | 169 | resume_console(); |
170 | Thaw: | ||
156 | unprepare_processes(); | 171 | unprepare_processes(); |
157 | return error; | 172 | return error; |
158 | } | 173 | } |
@@ -249,6 +264,8 @@ static const char * const pm_disk_modes[] = { | |||
249 | [PM_DISK_PLATFORM] = "platform", | 264 | [PM_DISK_PLATFORM] = "platform", |
250 | [PM_DISK_SHUTDOWN] = "shutdown", | 265 | [PM_DISK_SHUTDOWN] = "shutdown", |
251 | [PM_DISK_REBOOT] = "reboot", | 266 | [PM_DISK_REBOOT] = "reboot", |
267 | [PM_DISK_TEST] = "test", | ||
268 | [PM_DISK_TESTPROC] = "testproc", | ||
252 | }; | 269 | }; |
253 | 270 | ||
254 | /** | 271 | /** |
@@ -303,17 +320,19 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) | |||
303 | } | 320 | } |
304 | } | 321 | } |
305 | if (mode) { | 322 | if (mode) { |
306 | if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT) | 323 | if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT || |
324 | mode == PM_DISK_TEST || mode == PM_DISK_TESTPROC) { | ||
307 | pm_disk_mode = mode; | 325 | pm_disk_mode = mode; |
308 | else { | 326 | } else { |
309 | if (pm_ops && pm_ops->enter && | 327 | if (pm_ops && pm_ops->enter && |
310 | (mode == pm_ops->pm_disk_mode)) | 328 | (mode == pm_ops->pm_disk_mode)) |
311 | pm_disk_mode = mode; | 329 | pm_disk_mode = mode; |
312 | else | 330 | else |
313 | error = -EINVAL; | 331 | error = -EINVAL; |
314 | } | 332 | } |
315 | } else | 333 | } else { |
316 | error = -EINVAL; | 334 | error = -EINVAL; |
335 | } | ||
317 | 336 | ||
318 | pr_debug("PM: suspend-to-disk mode set to '%s'\n", | 337 | pr_debug("PM: suspend-to-disk mode set to '%s'\n", |
319 | pm_disk_modes[mode]); | 338 | pm_disk_modes[mode]); |
diff --git a/kernel/printk.c b/kernel/printk.c index f7d427ef5038..66426552fbfe 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
32 | #include <linux/bootmem.h> | 32 | #include <linux/bootmem.h> |
33 | #include <linux/syscalls.h> | 33 | #include <linux/syscalls.h> |
34 | #include <linux/jiffies.h> | ||
34 | 35 | ||
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | 37 | ||
@@ -1101,3 +1102,23 @@ int printk_ratelimit(void) | |||
1101 | printk_ratelimit_burst); | 1102 | printk_ratelimit_burst); |
1102 | } | 1103 | } |
1103 | EXPORT_SYMBOL(printk_ratelimit); | 1104 | EXPORT_SYMBOL(printk_ratelimit); |
1105 | |||
1106 | /** | ||
1107 | * printk_timed_ratelimit - caller-controlled printk ratelimiting | ||
1108 | * @caller_jiffies: pointer to caller's state | ||
1109 | * @interval_msecs: minimum interval between prints | ||
1110 | * | ||
1111 | * printk_timed_ratelimit() returns true if more than @interval_msecs | ||
1112 | * milliseconds have elapsed since the last time printk_timed_ratelimit() | ||
1113 | * returned true. | ||
1114 | */ | ||
1115 | bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
1116 | unsigned int interval_msecs) | ||
1117 | { | ||
1118 | if (*caller_jiffies == 0 || time_after(jiffies, *caller_jiffies)) { | ||
1119 | *caller_jiffies = jiffies + msecs_to_jiffies(interval_msecs); | ||
1120 | return true; | ||
1121 | } | ||
1122 | return false; | ||
1123 | } | ||
1124 | EXPORT_SYMBOL(printk_timed_ratelimit); | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 7ed8d5304bec..df18c167a2a7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -267,18 +267,25 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
267 | int override_rlimit) | 267 | int override_rlimit) |
268 | { | 268 | { |
269 | struct sigqueue *q = NULL; | 269 | struct sigqueue *q = NULL; |
270 | struct user_struct *user; | ||
270 | 271 | ||
271 | atomic_inc(&t->user->sigpending); | 272 | /* |
273 | * In order to avoid problems with "switch_user()", we want to make | ||
274 | * sure that the compiler doesn't re-load "t->user" | ||
275 | */ | ||
276 | user = t->user; | ||
277 | barrier(); | ||
278 | atomic_inc(&user->sigpending); | ||
272 | if (override_rlimit || | 279 | if (override_rlimit || |
273 | atomic_read(&t->user->sigpending) <= | 280 | atomic_read(&user->sigpending) <= |
274 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) | 281 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) |
275 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 282 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
276 | if (unlikely(q == NULL)) { | 283 | if (unlikely(q == NULL)) { |
277 | atomic_dec(&t->user->sigpending); | 284 | atomic_dec(&user->sigpending); |
278 | } else { | 285 | } else { |
279 | INIT_LIST_HEAD(&q->list); | 286 | INIT_LIST_HEAD(&q->list); |
280 | q->flags = 0; | 287 | q->flags = 0; |
281 | q->user = get_uid(t->user); | 288 | q->user = get_uid(user); |
282 | } | 289 | } |
283 | return(q); | 290 | return(q); |
284 | } | 291 | } |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 476c3741511b..2c6c2bf85514 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -293,6 +293,27 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass) | |||
293 | } | 293 | } |
294 | 294 | ||
295 | EXPORT_SYMBOL(_spin_lock_nested); | 295 | EXPORT_SYMBOL(_spin_lock_nested); |
296 | unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass) | ||
297 | { | ||
298 | unsigned long flags; | ||
299 | |||
300 | local_irq_save(flags); | ||
301 | preempt_disable(); | ||
302 | spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); | ||
303 | /* | ||
304 | * On lockdep we dont want the hand-coded irq-enable of | ||
305 | * _raw_spin_lock_flags() code, because lockdep assumes | ||
306 | * that interrupts are not re-enabled during lock-acquire: | ||
307 | */ | ||
308 | #ifdef CONFIG_PROVE_SPIN_LOCKING | ||
309 | _raw_spin_lock(lock); | ||
310 | #else | ||
311 | _raw_spin_lock_flags(lock, &flags); | ||
312 | #endif | ||
313 | return flags; | ||
314 | } | ||
315 | |||
316 | EXPORT_SYMBOL(_spin_lock_irqsave_nested); | ||
296 | 317 | ||
297 | #endif | 318 | #endif |
298 | 319 | ||
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 0e53314b14de..d7306d0f3dfc 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -135,6 +135,7 @@ cond_syscall(sys_madvise); | |||
135 | cond_syscall(sys_mremap); | 135 | cond_syscall(sys_mremap); |
136 | cond_syscall(sys_remap_file_pages); | 136 | cond_syscall(sys_remap_file_pages); |
137 | cond_syscall(compat_sys_move_pages); | 137 | cond_syscall(compat_sys_move_pages); |
138 | cond_syscall(compat_sys_migrate_pages); | ||
138 | 139 | ||
139 | /* block-layer dependent */ | 140 | /* block-layer dependent */ |
140 | cond_syscall(sys_bdflush); | 141 | cond_syscall(sys_bdflush); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8bff2c18fb5a..09e569f4792b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1315,7 +1315,9 @@ repeat: | |||
1315 | return -ENOTDIR; | 1315 | return -ENOTDIR; |
1316 | if (get_user(n, name)) | 1316 | if (get_user(n, name)) |
1317 | return -EFAULT; | 1317 | return -EFAULT; |
1318 | for ( ; table->ctl_name; table++) { | 1318 | for ( ; table->ctl_name || table->procname; table++) { |
1319 | if (!table->ctl_name) | ||
1320 | continue; | ||
1319 | if (n == table->ctl_name || table->ctl_name == CTL_ANY) { | 1321 | if (n == table->ctl_name || table->ctl_name == CTL_ANY) { |
1320 | int error; | 1322 | int error; |
1321 | if (table->child) { | 1323 | if (table->child) { |
@@ -1532,7 +1534,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, | |||
1532 | int len; | 1534 | int len; |
1533 | mode_t mode; | 1535 | mode_t mode; |
1534 | 1536 | ||
1535 | for (; table->ctl_name; table++) { | 1537 | for (; table->ctl_name || table->procname; table++) { |
1536 | /* Can't do anything without a proc name. */ | 1538 | /* Can't do anything without a proc name. */ |
1537 | if (!table->procname) | 1539 | if (!table->procname) |
1538 | continue; | 1540 | continue; |
@@ -1579,7 +1581,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, | |||
1579 | static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root) | 1581 | static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root) |
1580 | { | 1582 | { |
1581 | struct proc_dir_entry *de; | 1583 | struct proc_dir_entry *de; |
1582 | for (; table->ctl_name; table++) { | 1584 | for (; table->ctl_name || table->procname; table++) { |
1583 | if (!(de = table->de)) | 1585 | if (!(de = table->de)) |
1584 | continue; | 1586 | continue; |
1585 | if (de->mode & S_IFDIR) { | 1587 | if (de->mode & S_IFDIR) { |
@@ -2680,13 +2682,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, | |||
2680 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) | 2682 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) |
2681 | { | 2683 | { |
2682 | static int msg_count; | 2684 | static int msg_count; |
2685 | struct __sysctl_args tmp; | ||
2686 | int name[CTL_MAXNAME]; | ||
2687 | int i; | ||
2688 | |||
2689 | /* Read in the sysctl name for better debug message logging */ | ||
2690 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
2691 | return -EFAULT; | ||
2692 | if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME) | ||
2693 | return -ENOTDIR; | ||
2694 | for (i = 0; i < tmp.nlen; i++) | ||
2695 | if (get_user(name[i], tmp.name + i)) | ||
2696 | return -EFAULT; | ||
2697 | |||
2698 | /* Ignore accesses to kernel.version */ | ||
2699 | if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION)) | ||
2700 | goto out; | ||
2683 | 2701 | ||
2684 | if (msg_count < 5) { | 2702 | if (msg_count < 5) { |
2685 | msg_count++; | 2703 | msg_count++; |
2686 | printk(KERN_INFO | 2704 | printk(KERN_INFO |
2687 | "warning: process `%s' used the removed sysctl " | 2705 | "warning: process `%s' used the removed sysctl " |
2688 | "system call\n", current->comm); | 2706 | "system call with ", current->comm); |
2707 | for (i = 0; i < tmp.nlen; i++) | ||
2708 | printk("%d.", name[i]); | ||
2709 | printk("\n"); | ||
2689 | } | 2710 | } |
2711 | out: | ||
2690 | return -ENOSYS; | 2712 | return -ENOSYS; |
2691 | } | 2713 | } |
2692 | 2714 | ||
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 5d6a8c54ee85..f45c5e70773c 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -77,7 +77,8 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, | |||
77 | /* | 77 | /* |
78 | * If new attributes are added, please revisit this allocation | 78 | * If new attributes are added, please revisit this allocation |
79 | */ | 79 | */ |
80 | skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL); | 80 | size = nlmsg_total_size(genlmsg_total_size(size)); |
81 | skb = nlmsg_new(size, GFP_KERNEL); | ||
81 | if (!skb) | 82 | if (!skb) |
82 | return -ENOMEM; | 83 | return -ENOMEM; |
83 | 84 | ||
@@ -174,21 +175,19 @@ static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) | |||
174 | up_write(&listeners->sem); | 175 | up_write(&listeners->sem); |
175 | } | 176 | } |
176 | 177 | ||
177 | static int fill_pid(pid_t pid, struct task_struct *pidtsk, | 178 | static int fill_pid(pid_t pid, struct task_struct *tsk, |
178 | struct taskstats *stats) | 179 | struct taskstats *stats) |
179 | { | 180 | { |
180 | int rc = 0; | 181 | int rc = 0; |
181 | struct task_struct *tsk = pidtsk; | ||
182 | 182 | ||
183 | if (!pidtsk) { | 183 | if (!tsk) { |
184 | read_lock(&tasklist_lock); | 184 | rcu_read_lock(); |
185 | tsk = find_task_by_pid(pid); | 185 | tsk = find_task_by_pid(pid); |
186 | if (!tsk) { | 186 | if (tsk) |
187 | read_unlock(&tasklist_lock); | 187 | get_task_struct(tsk); |
188 | rcu_read_unlock(); | ||
189 | if (!tsk) | ||
188 | return -ESRCH; | 190 | return -ESRCH; |
189 | } | ||
190 | get_task_struct(tsk); | ||
191 | read_unlock(&tasklist_lock); | ||
192 | } else | 191 | } else |
193 | get_task_struct(tsk); | 192 | get_task_struct(tsk); |
194 | 193 | ||
@@ -214,39 +213,30 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk, | |||
214 | 213 | ||
215 | } | 214 | } |
216 | 215 | ||
217 | static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, | 216 | static int fill_tgid(pid_t tgid, struct task_struct *first, |
218 | struct taskstats *stats) | 217 | struct taskstats *stats) |
219 | { | 218 | { |
220 | struct task_struct *tsk, *first; | 219 | struct task_struct *tsk; |
221 | unsigned long flags; | 220 | unsigned long flags; |
221 | int rc = -ESRCH; | ||
222 | 222 | ||
223 | /* | 223 | /* |
224 | * Add additional stats from live tasks except zombie thread group | 224 | * Add additional stats from live tasks except zombie thread group |
225 | * leaders who are already counted with the dead tasks | 225 | * leaders who are already counted with the dead tasks |
226 | */ | 226 | */ |
227 | first = tgidtsk; | 227 | rcu_read_lock(); |
228 | if (!first) { | 228 | if (!first) |
229 | read_lock(&tasklist_lock); | ||
230 | first = find_task_by_pid(tgid); | 229 | first = find_task_by_pid(tgid); |
231 | if (!first) { | ||
232 | read_unlock(&tasklist_lock); | ||
233 | return -ESRCH; | ||
234 | } | ||
235 | get_task_struct(first); | ||
236 | read_unlock(&tasklist_lock); | ||
237 | } else | ||
238 | get_task_struct(first); | ||
239 | 230 | ||
240 | /* Start with stats from dead tasks */ | 231 | if (!first || !lock_task_sighand(first, &flags)) |
241 | spin_lock_irqsave(&first->signal->stats_lock, flags); | 232 | goto out; |
233 | |||
242 | if (first->signal->stats) | 234 | if (first->signal->stats) |
243 | memcpy(stats, first->signal->stats, sizeof(*stats)); | 235 | memcpy(stats, first->signal->stats, sizeof(*stats)); |
244 | spin_unlock_irqrestore(&first->signal->stats_lock, flags); | ||
245 | 236 | ||
246 | tsk = first; | 237 | tsk = first; |
247 | read_lock(&tasklist_lock); | ||
248 | do { | 238 | do { |
249 | if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk)) | 239 | if (tsk->exit_state) |
250 | continue; | 240 | continue; |
251 | /* | 241 | /* |
252 | * Accounting subsystem can call its functions here to | 242 | * Accounting subsystem can call its functions here to |
@@ -257,15 +247,18 @@ static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, | |||
257 | delayacct_add_tsk(stats, tsk); | 247 | delayacct_add_tsk(stats, tsk); |
258 | 248 | ||
259 | } while_each_thread(first, tsk); | 249 | } while_each_thread(first, tsk); |
260 | read_unlock(&tasklist_lock); | ||
261 | stats->version = TASKSTATS_VERSION; | ||
262 | 250 | ||
251 | unlock_task_sighand(first, &flags); | ||
252 | rc = 0; | ||
253 | out: | ||
254 | rcu_read_unlock(); | ||
255 | |||
256 | stats->version = TASKSTATS_VERSION; | ||
263 | /* | 257 | /* |
264 | * Accounting subsytems can also add calls here to modify | 258 | * Accounting subsytems can also add calls here to modify |
265 | * fields of taskstats. | 259 | * fields of taskstats. |
266 | */ | 260 | */ |
267 | 261 | return rc; | |
268 | return 0; | ||
269 | } | 262 | } |
270 | 263 | ||
271 | 264 | ||
@@ -273,7 +266,7 @@ static void fill_tgid_exit(struct task_struct *tsk) | |||
273 | { | 266 | { |
274 | unsigned long flags; | 267 | unsigned long flags; |
275 | 268 | ||
276 | spin_lock_irqsave(&tsk->signal->stats_lock, flags); | 269 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
277 | if (!tsk->signal->stats) | 270 | if (!tsk->signal->stats) |
278 | goto ret; | 271 | goto ret; |
279 | 272 | ||
@@ -285,7 +278,7 @@ static void fill_tgid_exit(struct task_struct *tsk) | |||
285 | */ | 278 | */ |
286 | delayacct_add_tsk(tsk->signal->stats, tsk); | 279 | delayacct_add_tsk(tsk->signal->stats, tsk); |
287 | ret: | 280 | ret: |
288 | spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); | 281 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
289 | return; | 282 | return; |
290 | } | 283 | } |
291 | 284 | ||
@@ -419,7 +412,7 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | |||
419 | return send_reply(rep_skb, info->snd_pid); | 412 | return send_reply(rep_skb, info->snd_pid); |
420 | 413 | ||
421 | nla_put_failure: | 414 | nla_put_failure: |
422 | return genlmsg_cancel(rep_skb, reply); | 415 | rc = genlmsg_cancel(rep_skb, reply); |
423 | err: | 416 | err: |
424 | nlmsg_free(rep_skb); | 417 | nlmsg_free(rep_skb); |
425 | return rc; | 418 | return rc; |
@@ -461,24 +454,26 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
461 | size_t size; | 454 | size_t size; |
462 | int is_thread_group; | 455 | int is_thread_group; |
463 | struct nlattr *na; | 456 | struct nlattr *na; |
464 | unsigned long flags; | ||
465 | 457 | ||
466 | if (!family_registered || !tidstats) | 458 | if (!family_registered) |
467 | return; | 459 | return; |
468 | 460 | ||
469 | spin_lock_irqsave(&tsk->signal->stats_lock, flags); | ||
470 | is_thread_group = tsk->signal->stats ? 1 : 0; | ||
471 | spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); | ||
472 | |||
473 | rc = 0; | ||
474 | /* | 461 | /* |
475 | * Size includes space for nested attributes | 462 | * Size includes space for nested attributes |
476 | */ | 463 | */ |
477 | size = nla_total_size(sizeof(u32)) + | 464 | size = nla_total_size(sizeof(u32)) + |
478 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | 465 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); |
479 | 466 | ||
480 | if (is_thread_group) | 467 | is_thread_group = (tsk->signal->stats != NULL); |
481 | size = 2 * size; /* PID + STATS + TGID + STATS */ | 468 | if (is_thread_group) { |
469 | /* PID + STATS + TGID + STATS */ | ||
470 | size = 2 * size; | ||
471 | /* fill the tsk->signal->stats structure */ | ||
472 | fill_tgid_exit(tsk); | ||
473 | } | ||
474 | |||
475 | if (!tidstats) | ||
476 | return; | ||
482 | 477 | ||
483 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); | 478 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); |
484 | if (rc < 0) | 479 | if (rc < 0) |
@@ -498,11 +493,8 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
498 | goto send; | 493 | goto send; |
499 | 494 | ||
500 | /* | 495 | /* |
501 | * tsk has/had a thread group so fill the tsk->signal->stats structure | ||
502 | * Doesn't matter if tsk is the leader or the last group member leaving | 496 | * Doesn't matter if tsk is the leader or the last group member leaving |
503 | */ | 497 | */ |
504 | |||
505 | fill_tgid_exit(tsk); | ||
506 | if (!group_dead) | 498 | if (!group_dead) |
507 | goto send; | 499 | goto send; |
508 | 500 | ||
@@ -519,7 +511,6 @@ send: | |||
519 | 511 | ||
520 | nla_put_failure: | 512 | nla_put_failure: |
521 | genlmsg_cancel(rep_skb, reply); | 513 | genlmsg_cancel(rep_skb, reply); |
522 | goto ret; | ||
523 | err_skb: | 514 | err_skb: |
524 | nlmsg_free(rep_skb); | 515 | nlmsg_free(rep_skb); |
525 | ret: | 516 | ret: |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 47195fa0ec4f..3afeaa3a73f9 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -161,9 +161,9 @@ void second_overflow(void) | |||
161 | time_adjust += MAX_TICKADJ; | 161 | time_adjust += MAX_TICKADJ; |
162 | tick_length -= MAX_TICKADJ_SCALED; | 162 | tick_length -= MAX_TICKADJ_SCALED; |
163 | } else { | 163 | } else { |
164 | time_adjust = 0; | ||
165 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / | 164 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / |
166 | HZ) << TICK_LENGTH_SHIFT; | 165 | HZ) << TICK_LENGTH_SHIFT; |
166 | time_adjust = 0; | ||
167 | } | 167 | } |
168 | } | 168 | } |
169 | } | 169 | } |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index db443221ba5b..96f77013d3f0 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -36,7 +36,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
36 | 36 | ||
37 | /* calculate task elapsed time in timespec */ | 37 | /* calculate task elapsed time in timespec */ |
38 | do_posix_clock_monotonic_gettime(&uptime); | 38 | do_posix_clock_monotonic_gettime(&uptime); |
39 | ts = timespec_sub(uptime, current->group_leader->start_time); | 39 | ts = timespec_sub(uptime, tsk->start_time); |
40 | /* rebase elapsed time to usec */ | 40 | /* rebase elapsed time to usec */ |
41 | ac_etime = timespec_to_ns(&ts); | 41 | ac_etime = timespec_to_ns(&ts); |
42 | do_div(ac_etime, NSEC_PER_USEC); | 42 | do_div(ac_etime, NSEC_PER_USEC); |
@@ -58,7 +58,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
58 | stats->ac_uid = tsk->uid; | 58 | stats->ac_uid = tsk->uid; |
59 | stats->ac_gid = tsk->gid; | 59 | stats->ac_gid = tsk->gid; |
60 | stats->ac_pid = tsk->pid; | 60 | stats->ac_pid = tsk->pid; |
61 | stats->ac_ppid = (tsk->parent) ? tsk->parent->pid : 0; | 61 | rcu_read_lock(); |
62 | stats->ac_ppid = pid_alive(tsk) ? | ||
63 | rcu_dereference(tsk->real_parent)->tgid : 0; | ||
64 | rcu_read_unlock(); | ||
62 | stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; | 65 | stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; |
63 | stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; | 66 | stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; |
64 | stats->ac_minflt = tsk->min_flt; | 67 | stats->ac_minflt = tsk->min_flt; |
@@ -77,13 +80,17 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
77 | */ | 80 | */ |
78 | void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | 81 | void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) |
79 | { | 82 | { |
83 | struct mm_struct *mm; | ||
84 | |||
80 | /* convert pages-jiffies to Mbyte-usec */ | 85 | /* convert pages-jiffies to Mbyte-usec */ |
81 | stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; | 86 | stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; |
82 | stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; | 87 | stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; |
83 | if (p->mm) { | 88 | mm = get_task_mm(p); |
89 | if (mm) { | ||
84 | /* adjust to KB unit */ | 90 | /* adjust to KB unit */ |
85 | stats->hiwater_rss = p->mm->hiwater_rss * PAGE_SIZE / KB; | 91 | stats->hiwater_rss = mm->hiwater_rss * PAGE_SIZE / KB; |
86 | stats->hiwater_vm = p->mm->hiwater_vm * PAGE_SIZE / KB; | 92 | stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; |
93 | mmput(mm); | ||
87 | } | 94 | } |
88 | stats->read_char = p->rchar; | 95 | stats->read_char = p->rchar; |
89 | stats->write_char = p->wchar; | 96 | stats->write_char = p->wchar; |
diff --git a/kernel/unwind.c b/kernel/unwind.c index 2e2368607aab..ed0a21d4a902 100644 --- a/kernel/unwind.c +++ b/kernel/unwind.c | |||
@@ -11,13 +11,15 @@ | |||
11 | 11 | ||
12 | #include <linux/unwind.h> | 12 | #include <linux/unwind.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <linux/delay.h> | 14 | #include <linux/bootmem.h> |
15 | #include <linux/sort.h> | ||
15 | #include <linux/stop_machine.h> | 16 | #include <linux/stop_machine.h> |
16 | #include <asm/sections.h> | 17 | #include <asm/sections.h> |
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <asm/unaligned.h> | 19 | #include <asm/unaligned.h> |
19 | 20 | ||
20 | extern char __start_unwind[], __end_unwind[]; | 21 | extern char __start_unwind[], __end_unwind[]; |
22 | extern const u8 __start_unwind_hdr[], __end_unwind_hdr[]; | ||
21 | 23 | ||
22 | #define MAX_STACK_DEPTH 8 | 24 | #define MAX_STACK_DEPTH 8 |
23 | 25 | ||
@@ -100,6 +102,8 @@ static struct unwind_table { | |||
100 | } core, init; | 102 | } core, init; |
101 | const void *address; | 103 | const void *address; |
102 | unsigned long size; | 104 | unsigned long size; |
105 | const unsigned char *header; | ||
106 | unsigned long hdrsz; | ||
103 | struct unwind_table *link; | 107 | struct unwind_table *link; |
104 | const char *name; | 108 | const char *name; |
105 | } root_table; | 109 | } root_table; |
@@ -145,6 +149,10 @@ static struct unwind_table *find_table(unsigned long pc) | |||
145 | return table; | 149 | return table; |
146 | } | 150 | } |
147 | 151 | ||
152 | static unsigned long read_pointer(const u8 **pLoc, | ||
153 | const void *end, | ||
154 | signed ptrType); | ||
155 | |||
148 | static void init_unwind_table(struct unwind_table *table, | 156 | static void init_unwind_table(struct unwind_table *table, |
149 | const char *name, | 157 | const char *name, |
150 | const void *core_start, | 158 | const void *core_start, |
@@ -152,14 +160,30 @@ static void init_unwind_table(struct unwind_table *table, | |||
152 | const void *init_start, | 160 | const void *init_start, |
153 | unsigned long init_size, | 161 | unsigned long init_size, |
154 | const void *table_start, | 162 | const void *table_start, |
155 | unsigned long table_size) | 163 | unsigned long table_size, |
164 | const u8 *header_start, | ||
165 | unsigned long header_size) | ||
156 | { | 166 | { |
167 | const u8 *ptr = header_start + 4; | ||
168 | const u8 *end = header_start + header_size; | ||
169 | |||
157 | table->core.pc = (unsigned long)core_start; | 170 | table->core.pc = (unsigned long)core_start; |
158 | table->core.range = core_size; | 171 | table->core.range = core_size; |
159 | table->init.pc = (unsigned long)init_start; | 172 | table->init.pc = (unsigned long)init_start; |
160 | table->init.range = init_size; | 173 | table->init.range = init_size; |
161 | table->address = table_start; | 174 | table->address = table_start; |
162 | table->size = table_size; | 175 | table->size = table_size; |
176 | /* See if the linker provided table looks valid. */ | ||
177 | if (header_size <= 4 | ||
178 | || header_start[0] != 1 | ||
179 | || (void *)read_pointer(&ptr, end, header_start[1]) != table_start | ||
180 | || header_start[2] == DW_EH_PE_omit | ||
181 | || read_pointer(&ptr, end, header_start[2]) <= 0 | ||
182 | || header_start[3] == DW_EH_PE_omit) | ||
183 | header_start = NULL; | ||
184 | table->hdrsz = header_size; | ||
185 | smp_wmb(); | ||
186 | table->header = header_start; | ||
163 | table->link = NULL; | 187 | table->link = NULL; |
164 | table->name = name; | 188 | table->name = name; |
165 | } | 189 | } |
@@ -169,7 +193,143 @@ void __init unwind_init(void) | |||
169 | init_unwind_table(&root_table, "kernel", | 193 | init_unwind_table(&root_table, "kernel", |
170 | _text, _end - _text, | 194 | _text, _end - _text, |
171 | NULL, 0, | 195 | NULL, 0, |
172 | __start_unwind, __end_unwind - __start_unwind); | 196 | __start_unwind, __end_unwind - __start_unwind, |
197 | __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr); | ||
198 | } | ||
199 | |||
200 | static const u32 bad_cie, not_fde; | ||
201 | static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *); | ||
202 | static signed fde_pointer_type(const u32 *cie); | ||
203 | |||
204 | struct eh_frame_hdr_table_entry { | ||
205 | unsigned long start, fde; | ||
206 | }; | ||
207 | |||
208 | static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2) | ||
209 | { | ||
210 | const struct eh_frame_hdr_table_entry *e1 = p1; | ||
211 | const struct eh_frame_hdr_table_entry *e2 = p2; | ||
212 | |||
213 | return (e1->start > e2->start) - (e1->start < e2->start); | ||
214 | } | ||
215 | |||
216 | static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size) | ||
217 | { | ||
218 | struct eh_frame_hdr_table_entry *e1 = p1; | ||
219 | struct eh_frame_hdr_table_entry *e2 = p2; | ||
220 | unsigned long v; | ||
221 | |||
222 | v = e1->start; | ||
223 | e1->start = e2->start; | ||
224 | e2->start = v; | ||
225 | v = e1->fde; | ||
226 | e1->fde = e2->fde; | ||
227 | e2->fde = v; | ||
228 | } | ||
229 | |||
230 | static void __init setup_unwind_table(struct unwind_table *table, | ||
231 | void *(*alloc)(unsigned long)) | ||
232 | { | ||
233 | const u8 *ptr; | ||
234 | unsigned long tableSize = table->size, hdrSize; | ||
235 | unsigned n; | ||
236 | const u32 *fde; | ||
237 | struct { | ||
238 | u8 version; | ||
239 | u8 eh_frame_ptr_enc; | ||
240 | u8 fde_count_enc; | ||
241 | u8 table_enc; | ||
242 | unsigned long eh_frame_ptr; | ||
243 | unsigned int fde_count; | ||
244 | struct eh_frame_hdr_table_entry table[]; | ||
245 | } __attribute__((__packed__)) *header; | ||
246 | |||
247 | if (table->header) | ||
248 | return; | ||
249 | |||
250 | if (table->hdrsz) | ||
251 | printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n", | ||
252 | table->name); | ||
253 | |||
254 | if (tableSize & (sizeof(*fde) - 1)) | ||
255 | return; | ||
256 | |||
257 | for (fde = table->address, n = 0; | ||
258 | tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; | ||
259 | tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { | ||
260 | const u32 *cie = cie_for_fde(fde, table); | ||
261 | signed ptrType; | ||
262 | |||
263 | if (cie == ¬_fde) | ||
264 | continue; | ||
265 | if (cie == NULL | ||
266 | || cie == &bad_cie | ||
267 | || (ptrType = fde_pointer_type(cie)) < 0) | ||
268 | return; | ||
269 | ptr = (const u8 *)(fde + 2); | ||
270 | if (!read_pointer(&ptr, | ||
271 | (const u8 *)(fde + 1) + *fde, | ||
272 | ptrType)) | ||
273 | return; | ||
274 | ++n; | ||
275 | } | ||
276 | |||
277 | if (tableSize || !n) | ||
278 | return; | ||
279 | |||
280 | hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int) | ||
281 | + 2 * n * sizeof(unsigned long); | ||
282 | header = alloc(hdrSize); | ||
283 | if (!header) | ||
284 | return; | ||
285 | header->version = 1; | ||
286 | header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native; | ||
287 | header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4; | ||
288 | header->table_enc = DW_EH_PE_abs|DW_EH_PE_native; | ||
289 | put_unaligned((unsigned long)table->address, &header->eh_frame_ptr); | ||
290 | BUILD_BUG_ON(offsetof(typeof(*header), fde_count) | ||
291 | % __alignof(typeof(header->fde_count))); | ||
292 | header->fde_count = n; | ||
293 | |||
294 | BUILD_BUG_ON(offsetof(typeof(*header), table) | ||
295 | % __alignof(typeof(*header->table))); | ||
296 | for (fde = table->address, tableSize = table->size, n = 0; | ||
297 | tableSize; | ||
298 | tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) { | ||
299 | const u32 *cie = fde + 1 - fde[1] / sizeof(*fde); | ||
300 | |||
301 | if (!fde[1]) | ||
302 | continue; /* this is a CIE */ | ||
303 | ptr = (const u8 *)(fde + 2); | ||
304 | header->table[n].start = read_pointer(&ptr, | ||
305 | (const u8 *)(fde + 1) + *fde, | ||
306 | fde_pointer_type(cie)); | ||
307 | header->table[n].fde = (unsigned long)fde; | ||
308 | ++n; | ||
309 | } | ||
310 | WARN_ON(n != header->fde_count); | ||
311 | |||
312 | sort(header->table, | ||
313 | n, | ||
314 | sizeof(*header->table), | ||
315 | cmp_eh_frame_hdr_table_entries, | ||
316 | swap_eh_frame_hdr_table_entries); | ||
317 | |||
318 | table->hdrsz = hdrSize; | ||
319 | smp_wmb(); | ||
320 | table->header = (const void *)header; | ||
321 | } | ||
322 | |||
323 | static void *__init balloc(unsigned long sz) | ||
324 | { | ||
325 | return __alloc_bootmem_nopanic(sz, | ||
326 | sizeof(unsigned int), | ||
327 | __pa(MAX_DMA_ADDRESS)); | ||
328 | } | ||
329 | |||
330 | void __init unwind_setup(void) | ||
331 | { | ||
332 | setup_unwind_table(&root_table, balloc); | ||
173 | } | 333 | } |
174 | 334 | ||
175 | #ifdef CONFIG_MODULES | 335 | #ifdef CONFIG_MODULES |
@@ -193,7 +353,8 @@ void *unwind_add_table(struct module *module, | |||
193 | init_unwind_table(table, module->name, | 353 | init_unwind_table(table, module->name, |
194 | module->module_core, module->core_size, | 354 | module->module_core, module->core_size, |
195 | module->module_init, module->init_size, | 355 | module->module_init, module->init_size, |
196 | table_start, table_size); | 356 | table_start, table_size, |
357 | NULL, 0); | ||
197 | 358 | ||
198 | if (last_table) | 359 | if (last_table) |
199 | last_table->link = table; | 360 | last_table->link = table; |
@@ -303,6 +464,26 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end) | |||
303 | return value; | 464 | return value; |
304 | } | 465 | } |
305 | 466 | ||
467 | static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table) | ||
468 | { | ||
469 | const u32 *cie; | ||
470 | |||
471 | if (!*fde || (*fde & (sizeof(*fde) - 1))) | ||
472 | return &bad_cie; | ||
473 | if (!fde[1]) | ||
474 | return ¬_fde; /* this is a CIE */ | ||
475 | if ((fde[1] & (sizeof(*fde) - 1)) | ||
476 | || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address) | ||
477 | return NULL; /* this is not a valid FDE */ | ||
478 | cie = fde + 1 - fde[1] / sizeof(*fde); | ||
479 | if (*cie <= sizeof(*cie) + 4 | ||
480 | || *cie >= fde[1] - sizeof(*fde) | ||
481 | || (*cie & (sizeof(*cie) - 1)) | ||
482 | || cie[1]) | ||
483 | return NULL; /* this is not a (valid) CIE */ | ||
484 | return cie; | ||
485 | } | ||
486 | |||
306 | static unsigned long read_pointer(const u8 **pLoc, | 487 | static unsigned long read_pointer(const u8 **pLoc, |
307 | const void *end, | 488 | const void *end, |
308 | signed ptrType) | 489 | signed ptrType) |
@@ -610,49 +791,108 @@ int unwind(struct unwind_frame_info *frame) | |||
610 | unsigned i; | 791 | unsigned i; |
611 | signed ptrType = -1; | 792 | signed ptrType = -1; |
612 | uleb128_t retAddrReg = 0; | 793 | uleb128_t retAddrReg = 0; |
613 | struct unwind_table *table; | 794 | const struct unwind_table *table; |
614 | struct unwind_state state; | 795 | struct unwind_state state; |
615 | 796 | ||
616 | if (UNW_PC(frame) == 0) | 797 | if (UNW_PC(frame) == 0) |
617 | return -EINVAL; | 798 | return -EINVAL; |
618 | if ((table = find_table(pc)) != NULL | 799 | if ((table = find_table(pc)) != NULL |
619 | && !(table->size & (sizeof(*fde) - 1))) { | 800 | && !(table->size & (sizeof(*fde) - 1))) { |
620 | unsigned long tableSize = table->size; | 801 | const u8 *hdr = table->header; |
621 | 802 | unsigned long tableSize; | |
622 | for (fde = table->address; | 803 | |
623 | tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; | 804 | smp_rmb(); |
624 | tableSize -= sizeof(*fde) + *fde, | 805 | if (hdr && hdr[0] == 1) { |
625 | fde += 1 + *fde / sizeof(*fde)) { | 806 | switch(hdr[3] & DW_EH_PE_FORM) { |
626 | if (!*fde || (*fde & (sizeof(*fde) - 1))) | 807 | case DW_EH_PE_native: tableSize = sizeof(unsigned long); break; |
627 | break; | 808 | case DW_EH_PE_data2: tableSize = 2; break; |
628 | if (!fde[1]) | 809 | case DW_EH_PE_data4: tableSize = 4; break; |
629 | continue; /* this is a CIE */ | 810 | case DW_EH_PE_data8: tableSize = 8; break; |
630 | if ((fde[1] & (sizeof(*fde) - 1)) | 811 | default: tableSize = 0; break; |
631 | || fde[1] > (unsigned long)(fde + 1) | ||
632 | - (unsigned long)table->address) | ||
633 | continue; /* this is not a valid FDE */ | ||
634 | cie = fde + 1 - fde[1] / sizeof(*fde); | ||
635 | if (*cie <= sizeof(*cie) + 4 | ||
636 | || *cie >= fde[1] - sizeof(*fde) | ||
637 | || (*cie & (sizeof(*cie) - 1)) | ||
638 | || cie[1] | ||
639 | || (ptrType = fde_pointer_type(cie)) < 0) { | ||
640 | cie = NULL; /* this is not a (valid) CIE */ | ||
641 | continue; | ||
642 | } | 812 | } |
813 | ptr = hdr + 4; | ||
814 | end = hdr + table->hdrsz; | ||
815 | if (tableSize | ||
816 | && read_pointer(&ptr, end, hdr[1]) | ||
817 | == (unsigned long)table->address | ||
818 | && (i = read_pointer(&ptr, end, hdr[2])) > 0 | ||
819 | && i == (end - ptr) / (2 * tableSize) | ||
820 | && !((end - ptr) % (2 * tableSize))) { | ||
821 | do { | ||
822 | const u8 *cur = ptr + (i / 2) * (2 * tableSize); | ||
823 | |||
824 | startLoc = read_pointer(&cur, | ||
825 | cur + tableSize, | ||
826 | hdr[3]); | ||
827 | if (pc < startLoc) | ||
828 | i /= 2; | ||
829 | else { | ||
830 | ptr = cur - tableSize; | ||
831 | i = (i + 1) / 2; | ||
832 | } | ||
833 | } while (startLoc && i > 1); | ||
834 | if (i == 1 | ||
835 | && (startLoc = read_pointer(&ptr, | ||
836 | ptr + tableSize, | ||
837 | hdr[3])) != 0 | ||
838 | && pc >= startLoc) | ||
839 | fde = (void *)read_pointer(&ptr, | ||
840 | ptr + tableSize, | ||
841 | hdr[3]); | ||
842 | } | ||
843 | } | ||
844 | |||
845 | if (fde != NULL) { | ||
846 | cie = cie_for_fde(fde, table); | ||
643 | ptr = (const u8 *)(fde + 2); | 847 | ptr = (const u8 *)(fde + 2); |
644 | startLoc = read_pointer(&ptr, | 848 | if(cie != NULL |
645 | (const u8 *)(fde + 1) + *fde, | 849 | && cie != &bad_cie |
646 | ptrType); | 850 | && cie != ¬_fde |
647 | endLoc = startLoc | 851 | && (ptrType = fde_pointer_type(cie)) >= 0 |
648 | + read_pointer(&ptr, | 852 | && read_pointer(&ptr, |
649 | (const u8 *)(fde + 1) + *fde, | 853 | (const u8 *)(fde + 1) + *fde, |
650 | ptrType & DW_EH_PE_indirect | 854 | ptrType) == startLoc) { |
651 | ? ptrType | 855 | if (!(ptrType & DW_EH_PE_indirect)) |
652 | : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); | 856 | ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; |
653 | if (pc >= startLoc && pc < endLoc) | 857 | endLoc = startLoc |
654 | break; | 858 | + read_pointer(&ptr, |
655 | cie = NULL; | 859 | (const u8 *)(fde + 1) + *fde, |
860 | ptrType); | ||
861 | if(pc >= endLoc) | ||
862 | fde = NULL; | ||
863 | } else | ||
864 | fde = NULL; | ||
865 | } | ||
866 | if (fde == NULL) { | ||
867 | for (fde = table->address, tableSize = table->size; | ||
868 | cie = NULL, tableSize > sizeof(*fde) | ||
869 | && tableSize - sizeof(*fde) >= *fde; | ||
870 | tableSize -= sizeof(*fde) + *fde, | ||
871 | fde += 1 + *fde / sizeof(*fde)) { | ||
872 | cie = cie_for_fde(fde, table); | ||
873 | if (cie == &bad_cie) { | ||
874 | cie = NULL; | ||
875 | break; | ||
876 | } | ||
877 | if (cie == NULL | ||
878 | || cie == ¬_fde | ||
879 | || (ptrType = fde_pointer_type(cie)) < 0) | ||
880 | continue; | ||
881 | ptr = (const u8 *)(fde + 2); | ||
882 | startLoc = read_pointer(&ptr, | ||
883 | (const u8 *)(fde + 1) + *fde, | ||
884 | ptrType); | ||
885 | if (!startLoc) | ||
886 | continue; | ||
887 | if (!(ptrType & DW_EH_PE_indirect)) | ||
888 | ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed; | ||
889 | endLoc = startLoc | ||
890 | + read_pointer(&ptr, | ||
891 | (const u8 *)(fde + 1) + *fde, | ||
892 | ptrType); | ||
893 | if (pc >= startLoc && pc < endLoc) | ||
894 | break; | ||
895 | } | ||
656 | } | 896 | } |
657 | } | 897 | } |
658 | if (cie != NULL) { | 898 | if (cie != NULL) { |
@@ -698,8 +938,11 @@ int unwind(struct unwind_frame_info *frame) | |||
698 | else { | 938 | else { |
699 | retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); | 939 | retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); |
700 | /* skip augmentation */ | 940 | /* skip augmentation */ |
701 | if (((const char *)(cie + 2))[1] == 'z') | 941 | if (((const char *)(cie + 2))[1] == 'z') { |
702 | ptr += get_uleb128(&ptr, end); | 942 | uleb128_t augSize = get_uleb128(&ptr, end); |
943 | |||
944 | ptr += augSize; | ||
945 | } | ||
703 | if (ptr > end | 946 | if (ptr > end |
704 | || retAddrReg >= ARRAY_SIZE(reg_info) | 947 | || retAddrReg >= ARRAY_SIZE(reg_info) |
705 | || REG_INVALID(retAddrReg) | 948 | || REG_INVALID(retAddrReg) |
@@ -723,9 +966,7 @@ int unwind(struct unwind_frame_info *frame) | |||
723 | if (cie == NULL || fde == NULL) { | 966 | if (cie == NULL || fde == NULL) { |
724 | #ifdef CONFIG_FRAME_POINTER | 967 | #ifdef CONFIG_FRAME_POINTER |
725 | unsigned long top, bottom; | 968 | unsigned long top, bottom; |
726 | #endif | ||
727 | 969 | ||
728 | #ifdef CONFIG_FRAME_POINTER | ||
729 | top = STACK_TOP(frame->task); | 970 | top = STACK_TOP(frame->task); |
730 | bottom = STACK_BOTTOM(frame->task); | 971 | bottom = STACK_BOTTOM(frame->task); |
731 | # if FRAME_RETADDR_OFFSET < 0 | 972 | # if FRAME_RETADDR_OFFSET < 0 |
diff --git a/kernel/user.c b/kernel/user.c index 6408c0424291..220e586127a0 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -187,6 +187,17 @@ void switch_uid(struct user_struct *new_user) | |||
187 | atomic_dec(&old_user->processes); | 187 | atomic_dec(&old_user->processes); |
188 | switch_uid_keyring(new_user); | 188 | switch_uid_keyring(new_user); |
189 | current->user = new_user; | 189 | current->user = new_user; |
190 | |||
191 | /* | ||
192 | * We need to synchronize with __sigqueue_alloc() | ||
193 | * doing a get_uid(p->user).. If that saw the old | ||
194 | * user value, we need to wait until it has exited | ||
195 | * its critical region before we can free the old | ||
196 | * structure. | ||
197 | */ | ||
198 | smp_mb(); | ||
199 | spin_unlock_wait(¤t->sighand->siglock); | ||
200 | |||
190 | free_uid(old_user); | 201 | free_uid(old_user); |
191 | suid_keys(current); | 202 | suid_keys(current); |
192 | } | 203 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3df9bfc7ff78..17c2f03d2c27 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -99,7 +99,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
99 | * @wq: workqueue to use | 99 | * @wq: workqueue to use |
100 | * @work: work to queue | 100 | * @work: work to queue |
101 | * | 101 | * |
102 | * Returns non-zero if it was successfully added. | 102 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
103 | * | 103 | * |
104 | * We queue the work to the CPU it was submitted, but there is no | 104 | * We queue the work to the CPU it was submitted, but there is no |
105 | * guarantee that it will be processed by that CPU. | 105 | * guarantee that it will be processed by that CPU. |
@@ -138,7 +138,7 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
138 | * @work: work to queue | 138 | * @work: work to queue |
139 | * @delay: number of jiffies to wait before queueing | 139 | * @delay: number of jiffies to wait before queueing |
140 | * | 140 | * |
141 | * Returns non-zero if it was successfully added. | 141 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
142 | */ | 142 | */ |
143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, |
144 | struct work_struct *work, unsigned long delay) | 144 | struct work_struct *work, unsigned long delay) |
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); | |||
169 | * @work: work to queue | 169 | * @work: work to queue |
170 | * @delay: number of jiffies to wait before queueing | 170 | * @delay: number of jiffies to wait before queueing |
171 | * | 171 | * |
172 | * Returns non-zero if it was successfully added. | 172 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
173 | */ | 173 | */ |
174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
175 | struct work_struct *work, unsigned long delay) | 175 | struct work_struct *work, unsigned long delay) |