diff options
author | Andi Kleen <andi@basil.nowhere.org> | 2006-11-21 04:22:09 -0500 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-11-21 04:22:09 -0500 |
commit | 1b7f6a626f0ff511c3840678466cbfe1d62c0b29 (patch) | |
tree | 415e8c838c0067bff384afb8a2c91e5f7c6d11d3 /kernel | |
parent | b3edc9cec07ade41aaf1804f7c9e876afa90c862 (diff) | |
parent | 3f5a6ca31c334011fd929501a078424c0d3f71be (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/compat.c | 35 | ||||
-rw-r--r-- | kernel/cpu.c | 14 | ||||
-rw-r--r-- | kernel/delayacct.c | 15 | ||||
-rw-r--r-- | kernel/exit.c | 1 | ||||
-rw-r--r-- | kernel/fork.c | 3 | ||||
-rw-r--r-- | kernel/futex.c | 7 | ||||
-rw-r--r-- | kernel/irq/chip.c | 2 | ||||
-rw-r--r-- | kernel/irq/manage.c | 9 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 6 | ||||
-rw-r--r-- | kernel/lockdep.c | 3 | ||||
-rw-r--r-- | kernel/module.c | 4 | ||||
-rw-r--r-- | kernel/power/disk.c | 37 | ||||
-rw-r--r-- | kernel/printk.c | 21 | ||||
-rw-r--r-- | kernel/signal.c | 15 | ||||
-rw-r--r-- | kernel/sys_ni.c | 1 | ||||
-rw-r--r-- | kernel/sysctl.c | 30 | ||||
-rw-r--r-- | kernel/taskstats.c | 87 | ||||
-rw-r--r-- | kernel/time/ntp.c | 2 | ||||
-rw-r--r-- | kernel/tsacct.c | 17 | ||||
-rw-r--r-- | kernel/user.c | 11 | ||||
-rw-r--r-- | kernel/workqueue.c | 6 |
21 files changed, 230 insertions, 96 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index 75573e5d27b0..6952dd057300 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -678,7 +678,7 @@ int get_compat_sigevent(struct sigevent *event, | |||
678 | ? -EFAULT : 0; | 678 | ? -EFAULT : 0; |
679 | } | 679 | } |
680 | 680 | ||
681 | long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask, | 681 | long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, |
682 | unsigned long bitmap_size) | 682 | unsigned long bitmap_size) |
683 | { | 683 | { |
684 | int i, j; | 684 | int i, j; |
@@ -982,4 +982,37 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages, | |||
982 | } | 982 | } |
983 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); | 983 | return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); |
984 | } | 984 | } |
985 | |||
986 | asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, | ||
987 | compat_ulong_t maxnode, | ||
988 | const compat_ulong_t __user *old_nodes, | ||
989 | const compat_ulong_t __user *new_nodes) | ||
990 | { | ||
991 | unsigned long __user *old = NULL; | ||
992 | unsigned long __user *new = NULL; | ||
993 | nodemask_t tmp_mask; | ||
994 | unsigned long nr_bits; | ||
995 | unsigned long size; | ||
996 | |||
997 | nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES); | ||
998 | size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | ||
999 | if (old_nodes) { | ||
1000 | if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits)) | ||
1001 | return -EFAULT; | ||
1002 | old = compat_alloc_user_space(new_nodes ? size * 2 : size); | ||
1003 | if (new_nodes) | ||
1004 | new = old + size / sizeof(unsigned long); | ||
1005 | if (copy_to_user(old, nodes_addr(tmp_mask), size)) | ||
1006 | return -EFAULT; | ||
1007 | } | ||
1008 | if (new_nodes) { | ||
1009 | if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits)) | ||
1010 | return -EFAULT; | ||
1011 | if (new == NULL) | ||
1012 | new = compat_alloc_user_space(size); | ||
1013 | if (copy_to_user(new, nodes_addr(tmp_mask), size)) | ||
1014 | return -EFAULT; | ||
1015 | } | ||
1016 | return sys_migrate_pages(pid, nr_bits + 1, old, new); | ||
1017 | } | ||
985 | #endif | 1018 | #endif |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 27dd3ee47099..272254f20d97 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -58,8 +58,8 @@ void unlock_cpu_hotplug(void) | |||
58 | recursive_depth--; | 58 | recursive_depth--; |
59 | return; | 59 | return; |
60 | } | 60 | } |
61 | mutex_unlock(&cpu_bitmask_lock); | ||
62 | recursive = NULL; | 61 | recursive = NULL; |
62 | mutex_unlock(&cpu_bitmask_lock); | ||
63 | } | 63 | } |
64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | 64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); |
65 | 65 | ||
@@ -150,18 +150,18 @@ static int _cpu_down(unsigned int cpu) | |||
150 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | 150 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
151 | mutex_unlock(&cpu_bitmask_lock); | 151 | mutex_unlock(&cpu_bitmask_lock); |
152 | 152 | ||
153 | if (IS_ERR(p)) { | 153 | if (IS_ERR(p) || cpu_online(cpu)) { |
154 | /* CPU didn't die: tell everyone. Can't complain. */ | 154 | /* CPU didn't die: tell everyone. Can't complain. */ |
155 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 155 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
156 | (void *)(long)cpu) == NOTIFY_BAD) | 156 | (void *)(long)cpu) == NOTIFY_BAD) |
157 | BUG(); | 157 | BUG(); |
158 | 158 | ||
159 | err = PTR_ERR(p); | 159 | if (IS_ERR(p)) { |
160 | goto out_allowed; | 160 | err = PTR_ERR(p); |
161 | } | 161 | goto out_allowed; |
162 | 162 | } | |
163 | if (cpu_online(cpu)) | ||
164 | goto out_thread; | 163 | goto out_thread; |
164 | } | ||
165 | 165 | ||
166 | /* Wait for it to sleep (leaving idle task). */ | 166 | /* Wait for it to sleep (leaving idle task). */ |
167 | while (!idle_cpu(cpu)) | 167 | while (!idle_cpu(cpu)) |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 36752f124c6a..66a0ea48751d 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -66,6 +66,7 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
66 | { | 66 | { |
67 | struct timespec ts; | 67 | struct timespec ts; |
68 | s64 ns; | 68 | s64 ns; |
69 | unsigned long flags; | ||
69 | 70 | ||
70 | do_posix_clock_monotonic_gettime(end); | 71 | do_posix_clock_monotonic_gettime(end); |
71 | ts = timespec_sub(*end, *start); | 72 | ts = timespec_sub(*end, *start); |
@@ -73,10 +74,10 @@ static void delayacct_end(struct timespec *start, struct timespec *end, | |||
73 | if (ns < 0) | 74 | if (ns < 0) |
74 | return; | 75 | return; |
75 | 76 | ||
76 | spin_lock(¤t->delays->lock); | 77 | spin_lock_irqsave(¤t->delays->lock, flags); |
77 | *total += ns; | 78 | *total += ns; |
78 | (*count)++; | 79 | (*count)++; |
79 | spin_unlock(¤t->delays->lock); | 80 | spin_unlock_irqrestore(¤t->delays->lock, flags); |
80 | } | 81 | } |
81 | 82 | ||
82 | void __delayacct_blkio_start(void) | 83 | void __delayacct_blkio_start(void) |
@@ -104,6 +105,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
104 | s64 tmp; | 105 | s64 tmp; |
105 | struct timespec ts; | 106 | struct timespec ts; |
106 | unsigned long t1,t2,t3; | 107 | unsigned long t1,t2,t3; |
108 | unsigned long flags; | ||
107 | 109 | ||
108 | /* Though tsk->delays accessed later, early exit avoids | 110 | /* Though tsk->delays accessed later, early exit avoids |
109 | * unnecessary returning of other data | 111 | * unnecessary returning of other data |
@@ -136,14 +138,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | |||
136 | 138 | ||
137 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ | 139 | /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ |
138 | 140 | ||
139 | spin_lock(&tsk->delays->lock); | 141 | spin_lock_irqsave(&tsk->delays->lock, flags); |
140 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; | 142 | tmp = d->blkio_delay_total + tsk->delays->blkio_delay; |
141 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; | 143 | d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; |
142 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; | 144 | tmp = d->swapin_delay_total + tsk->delays->swapin_delay; |
143 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; | 145 | d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; |
144 | d->blkio_count += tsk->delays->blkio_count; | 146 | d->blkio_count += tsk->delays->blkio_count; |
145 | d->swapin_count += tsk->delays->swapin_count; | 147 | d->swapin_count += tsk->delays->swapin_count; |
146 | spin_unlock(&tsk->delays->lock); | 148 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
147 | 149 | ||
148 | done: | 150 | done: |
149 | return 0; | 151 | return 0; |
@@ -152,11 +154,12 @@ done: | |||
152 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) | 154 | __u64 __delayacct_blkio_ticks(struct task_struct *tsk) |
153 | { | 155 | { |
154 | __u64 ret; | 156 | __u64 ret; |
157 | unsigned long flags; | ||
155 | 158 | ||
156 | spin_lock(&tsk->delays->lock); | 159 | spin_lock_irqsave(&tsk->delays->lock, flags); |
157 | ret = nsec_to_clock_t(tsk->delays->blkio_delay + | 160 | ret = nsec_to_clock_t(tsk->delays->blkio_delay + |
158 | tsk->delays->swapin_delay); | 161 | tsk->delays->swapin_delay); |
159 | spin_unlock(&tsk->delays->lock); | 162 | spin_unlock_irqrestore(&tsk->delays->lock, flags); |
160 | return ret; | 163 | return ret; |
161 | } | 164 | } |
162 | 165 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index f250a5e3e281..06de6c4e8ca3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -128,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk) | |||
128 | flush_sigqueue(&tsk->pending); | 128 | flush_sigqueue(&tsk->pending); |
129 | if (sig) { | 129 | if (sig) { |
130 | flush_sigqueue(&sig->shared_pending); | 130 | flush_sigqueue(&sig->shared_pending); |
131 | taskstats_tgid_free(sig); | ||
131 | __cleanup_signal(sig); | 132 | __cleanup_signal(sig); |
132 | } | 133 | } |
133 | } | 134 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 29ebb30850ed..3da978eec791 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -830,7 +830,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
830 | if (clone_flags & CLONE_THREAD) { | 830 | if (clone_flags & CLONE_THREAD) { |
831 | atomic_inc(¤t->signal->count); | 831 | atomic_inc(¤t->signal->count); |
832 | atomic_inc(¤t->signal->live); | 832 | atomic_inc(¤t->signal->live); |
833 | taskstats_tgid_alloc(current->signal); | 833 | taskstats_tgid_alloc(current); |
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
836 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 836 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); |
@@ -897,7 +897,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
897 | void __cleanup_signal(struct signal_struct *sig) | 897 | void __cleanup_signal(struct signal_struct *sig) |
898 | { | 898 | { |
899 | exit_thread_group_keys(sig); | 899 | exit_thread_group_keys(sig); |
900 | taskstats_tgid_free(sig); | ||
901 | kmem_cache_free(signal_cachep, sig); | 900 | kmem_cache_free(signal_cachep, sig); |
902 | } | 901 | } |
903 | 902 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index b364e0026191..93ef30ba209f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -1507,6 +1507,13 @@ static int futex_fd(u32 __user *uaddr, int signal) | |||
1507 | struct futex_q *q; | 1507 | struct futex_q *q; |
1508 | struct file *filp; | 1508 | struct file *filp; |
1509 | int ret, err; | 1509 | int ret, err; |
1510 | static unsigned long printk_interval; | ||
1511 | |||
1512 | if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) { | ||
1513 | printk(KERN_WARNING "Process `%s' used FUTEX_FD, which " | ||
1514 | "will be removed from the kernel in June 2007\n", | ||
1515 | current->comm); | ||
1516 | } | ||
1510 | 1517 | ||
1511 | ret = -EINVAL; | 1518 | ret = -EINVAL; |
1512 | if (!valid_signal(signal)) | 1519 | if (!valid_signal(signal)) |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 2d0dc3efe813..ebfd24a41858 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -233,6 +233,8 @@ void irq_chip_set_defaults(struct irq_chip *chip) | |||
233 | chip->shutdown = chip->disable; | 233 | chip->shutdown = chip->disable; |
234 | if (!chip->name) | 234 | if (!chip->name) |
235 | chip->name = chip->typename; | 235 | chip->name = chip->typename; |
236 | if (!chip->end) | ||
237 | chip->end = dummy_irq_chip.end; | ||
236 | } | 238 | } |
237 | 239 | ||
238 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) | 240 | static inline void mask_ack_irq(struct irq_desc *desc, int irq) |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6879202afe9a..b385878c6e80 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -216,6 +216,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
216 | { | 216 | { |
217 | struct irq_desc *desc = irq_desc + irq; | 217 | struct irq_desc *desc = irq_desc + irq; |
218 | struct irqaction *old, **p; | 218 | struct irqaction *old, **p; |
219 | const char *old_name = NULL; | ||
219 | unsigned long flags; | 220 | unsigned long flags; |
220 | int shared = 0; | 221 | int shared = 0; |
221 | 222 | ||
@@ -255,8 +256,10 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
255 | * set the trigger type must match. | 256 | * set the trigger type must match. |
256 | */ | 257 | */ |
257 | if (!((old->flags & new->flags) & IRQF_SHARED) || | 258 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
258 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) | 259 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { |
260 | old_name = old->name; | ||
259 | goto mismatch; | 261 | goto mismatch; |
262 | } | ||
260 | 263 | ||
261 | #if defined(CONFIG_IRQ_PER_CPU) | 264 | #if defined(CONFIG_IRQ_PER_CPU) |
262 | /* All handlers must agree on per-cpuness */ | 265 | /* All handlers must agree on per-cpuness */ |
@@ -322,11 +325,13 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
322 | return 0; | 325 | return 0; |
323 | 326 | ||
324 | mismatch: | 327 | mismatch: |
325 | spin_unlock_irqrestore(&desc->lock, flags); | ||
326 | if (!(new->flags & IRQF_PROBE_SHARED)) { | 328 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
327 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | 329 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); |
330 | if (old_name) | ||
331 | printk(KERN_ERR "current handler: %s\n", old_name); | ||
328 | dump_stack(); | 332 | dump_stack(); |
329 | } | 333 | } |
334 | spin_unlock_irqrestore(&desc->lock, flags); | ||
330 | return -EBUSY; | 335 | return -EBUSY; |
331 | } | 336 | } |
332 | 337 | ||
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 543ea2e5ad93..9c7e2e4c1fe7 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -147,7 +147,11 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
147 | if (unlikely(irqfixup)) { | 147 | if (unlikely(irqfixup)) { |
148 | /* Don't punish working computers */ | 148 | /* Don't punish working computers */ |
149 | if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { | 149 | if ((irqfixup == 2 && irq == 0) || action_ret == IRQ_NONE) { |
150 | int ok = misrouted_irq(irq); | 150 | int ok; |
151 | |||
152 | spin_unlock(&desc->lock); | ||
153 | ok = misrouted_irq(irq); | ||
154 | spin_lock(&desc->lock); | ||
151 | if (action_ret == IRQ_NONE) | 155 | if (action_ret == IRQ_NONE) |
152 | desc->irqs_unhandled -= ok; | 156 | desc->irqs_unhandled -= ok; |
153 | } | 157 | } |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b739be2a6dc9..c9fefdb1a7db 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1081,7 +1081,8 @@ static int static_obj(void *obj) | |||
1081 | */ | 1081 | */ |
1082 | for_each_possible_cpu(i) { | 1082 | for_each_possible_cpu(i) { |
1083 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); | 1083 | start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); |
1084 | end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); | 1084 | end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM |
1085 | + per_cpu_offset(i); | ||
1085 | 1086 | ||
1086 | if ((addr >= start) && (addr < end)) | 1087 | if ((addr >= start) && (addr < end)) |
1087 | return 1; | 1088 | return 1; |
diff --git a/kernel/module.c b/kernel/module.c index 67009bd56c52..f0166563c602 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1342,7 +1342,7 @@ static void set_license(struct module *mod, const char *license) | |||
1342 | 1342 | ||
1343 | if (!license_is_gpl_compatible(license)) { | 1343 | if (!license_is_gpl_compatible(license)) { |
1344 | if (!(tainted & TAINT_PROPRIETARY_MODULE)) | 1344 | if (!(tainted & TAINT_PROPRIETARY_MODULE)) |
1345 | printk(KERN_WARNING "%s: module license '%s' taints" | 1345 | printk(KERN_WARNING "%s: module license '%s' taints " |
1346 | "kernel.\n", mod->name, license); | 1346 | "kernel.\n", mod->name, license); |
1347 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); | 1347 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
1348 | } | 1348 | } |
@@ -1718,7 +1718,7 @@ static struct module *load_module(void __user *umod, | |||
1718 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); | 1718 | set_license(mod, get_modinfo(sechdrs, infoindex, "license")); |
1719 | 1719 | ||
1720 | if (strcmp(mod->name, "ndiswrapper") == 0) | 1720 | if (strcmp(mod->name, "ndiswrapper") == 0) |
1721 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); | 1721 | add_taint(TAINT_PROPRIETARY_MODULE); |
1722 | if (strcmp(mod->name, "driverloader") == 0) | 1722 | if (strcmp(mod->name, "driverloader") == 0) |
1723 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); | 1723 | add_taint_module(mod, TAINT_PROPRIETARY_MODULE); |
1724 | 1724 | ||
diff --git a/kernel/power/disk.c b/kernel/power/disk.c index d3a158a60312..b1fb7866b0b3 100644 --- a/kernel/power/disk.c +++ b/kernel/power/disk.c | |||
@@ -71,7 +71,7 @@ static inline void platform_finish(void) | |||
71 | 71 | ||
72 | static int prepare_processes(void) | 72 | static int prepare_processes(void) |
73 | { | 73 | { |
74 | int error; | 74 | int error = 0; |
75 | 75 | ||
76 | pm_prepare_console(); | 76 | pm_prepare_console(); |
77 | 77 | ||
@@ -84,6 +84,12 @@ static int prepare_processes(void) | |||
84 | goto thaw; | 84 | goto thaw; |
85 | } | 85 | } |
86 | 86 | ||
87 | if (pm_disk_mode == PM_DISK_TESTPROC) { | ||
88 | printk("swsusp debug: Waiting for 5 seconds.\n"); | ||
89 | mdelay(5000); | ||
90 | goto thaw; | ||
91 | } | ||
92 | |||
87 | /* Free memory before shutting down devices. */ | 93 | /* Free memory before shutting down devices. */ |
88 | if (!(error = swsusp_shrink_memory())) | 94 | if (!(error = swsusp_shrink_memory())) |
89 | return 0; | 95 | return 0; |
@@ -120,13 +126,21 @@ int pm_suspend_disk(void) | |||
120 | if (error) | 126 | if (error) |
121 | return error; | 127 | return error; |
122 | 128 | ||
129 | if (pm_disk_mode == PM_DISK_TESTPROC) | ||
130 | goto Thaw; | ||
131 | |||
123 | suspend_console(); | 132 | suspend_console(); |
124 | error = device_suspend(PMSG_FREEZE); | 133 | error = device_suspend(PMSG_FREEZE); |
125 | if (error) { | 134 | if (error) { |
126 | resume_console(); | 135 | resume_console(); |
127 | printk("Some devices failed to suspend\n"); | 136 | printk("Some devices failed to suspend\n"); |
128 | unprepare_processes(); | 137 | goto Thaw; |
129 | return error; | 138 | } |
139 | |||
140 | if (pm_disk_mode == PM_DISK_TEST) { | ||
141 | printk("swsusp debug: Waiting for 5 seconds.\n"); | ||
142 | mdelay(5000); | ||
143 | goto Done; | ||
130 | } | 144 | } |
131 | 145 | ||
132 | pr_debug("PM: snapshotting memory.\n"); | 146 | pr_debug("PM: snapshotting memory.\n"); |
@@ -143,16 +157,17 @@ int pm_suspend_disk(void) | |||
143 | power_down(pm_disk_mode); | 157 | power_down(pm_disk_mode); |
144 | else { | 158 | else { |
145 | swsusp_free(); | 159 | swsusp_free(); |
146 | unprepare_processes(); | 160 | goto Thaw; |
147 | return error; | ||
148 | } | 161 | } |
149 | } else | 162 | } else { |
150 | pr_debug("PM: Image restored successfully.\n"); | 163 | pr_debug("PM: Image restored successfully.\n"); |
164 | } | ||
151 | 165 | ||
152 | swsusp_free(); | 166 | swsusp_free(); |
153 | Done: | 167 | Done: |
154 | device_resume(); | 168 | device_resume(); |
155 | resume_console(); | 169 | resume_console(); |
170 | Thaw: | ||
156 | unprepare_processes(); | 171 | unprepare_processes(); |
157 | return error; | 172 | return error; |
158 | } | 173 | } |
@@ -249,6 +264,8 @@ static const char * const pm_disk_modes[] = { | |||
249 | [PM_DISK_PLATFORM] = "platform", | 264 | [PM_DISK_PLATFORM] = "platform", |
250 | [PM_DISK_SHUTDOWN] = "shutdown", | 265 | [PM_DISK_SHUTDOWN] = "shutdown", |
251 | [PM_DISK_REBOOT] = "reboot", | 266 | [PM_DISK_REBOOT] = "reboot", |
267 | [PM_DISK_TEST] = "test", | ||
268 | [PM_DISK_TESTPROC] = "testproc", | ||
252 | }; | 269 | }; |
253 | 270 | ||
254 | /** | 271 | /** |
@@ -303,17 +320,19 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n) | |||
303 | } | 320 | } |
304 | } | 321 | } |
305 | if (mode) { | 322 | if (mode) { |
306 | if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT) | 323 | if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT || |
324 | mode == PM_DISK_TEST || mode == PM_DISK_TESTPROC) { | ||
307 | pm_disk_mode = mode; | 325 | pm_disk_mode = mode; |
308 | else { | 326 | } else { |
309 | if (pm_ops && pm_ops->enter && | 327 | if (pm_ops && pm_ops->enter && |
310 | (mode == pm_ops->pm_disk_mode)) | 328 | (mode == pm_ops->pm_disk_mode)) |
311 | pm_disk_mode = mode; | 329 | pm_disk_mode = mode; |
312 | else | 330 | else |
313 | error = -EINVAL; | 331 | error = -EINVAL; |
314 | } | 332 | } |
315 | } else | 333 | } else { |
316 | error = -EINVAL; | 334 | error = -EINVAL; |
335 | } | ||
317 | 336 | ||
318 | pr_debug("PM: suspend-to-disk mode set to '%s'\n", | 337 | pr_debug("PM: suspend-to-disk mode set to '%s'\n", |
319 | pm_disk_modes[mode]); | 338 | pm_disk_modes[mode]); |
diff --git a/kernel/printk.c b/kernel/printk.c index f7d427ef5038..66426552fbfe 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/security.h> | 31 | #include <linux/security.h> |
32 | #include <linux/bootmem.h> | 32 | #include <linux/bootmem.h> |
33 | #include <linux/syscalls.h> | 33 | #include <linux/syscalls.h> |
34 | #include <linux/jiffies.h> | ||
34 | 35 | ||
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | 37 | ||
@@ -1101,3 +1102,23 @@ int printk_ratelimit(void) | |||
1101 | printk_ratelimit_burst); | 1102 | printk_ratelimit_burst); |
1102 | } | 1103 | } |
1103 | EXPORT_SYMBOL(printk_ratelimit); | 1104 | EXPORT_SYMBOL(printk_ratelimit); |
1105 | |||
1106 | /** | ||
1107 | * printk_timed_ratelimit - caller-controlled printk ratelimiting | ||
1108 | * @caller_jiffies: pointer to caller's state | ||
1109 | * @interval_msecs: minimum interval between prints | ||
1110 | * | ||
1111 | * printk_timed_ratelimit() returns true if more than @interval_msecs | ||
1112 | * milliseconds have elapsed since the last time printk_timed_ratelimit() | ||
1113 | * returned true. | ||
1114 | */ | ||
1115 | bool printk_timed_ratelimit(unsigned long *caller_jiffies, | ||
1116 | unsigned int interval_msecs) | ||
1117 | { | ||
1118 | if (*caller_jiffies == 0 || time_after(jiffies, *caller_jiffies)) { | ||
1119 | *caller_jiffies = jiffies + msecs_to_jiffies(interval_msecs); | ||
1120 | return true; | ||
1121 | } | ||
1122 | return false; | ||
1123 | } | ||
1124 | EXPORT_SYMBOL(printk_timed_ratelimit); | ||
diff --git a/kernel/signal.c b/kernel/signal.c index 7ed8d5304bec..df18c167a2a7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -267,18 +267,25 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags, | |||
267 | int override_rlimit) | 267 | int override_rlimit) |
268 | { | 268 | { |
269 | struct sigqueue *q = NULL; | 269 | struct sigqueue *q = NULL; |
270 | struct user_struct *user; | ||
270 | 271 | ||
271 | atomic_inc(&t->user->sigpending); | 272 | /* |
273 | * In order to avoid problems with "switch_user()", we want to make | ||
274 | * sure that the compiler doesn't re-load "t->user" | ||
275 | */ | ||
276 | user = t->user; | ||
277 | barrier(); | ||
278 | atomic_inc(&user->sigpending); | ||
272 | if (override_rlimit || | 279 | if (override_rlimit || |
273 | atomic_read(&t->user->sigpending) <= | 280 | atomic_read(&user->sigpending) <= |
274 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) | 281 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) |
275 | q = kmem_cache_alloc(sigqueue_cachep, flags); | 282 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
276 | if (unlikely(q == NULL)) { | 283 | if (unlikely(q == NULL)) { |
277 | atomic_dec(&t->user->sigpending); | 284 | atomic_dec(&user->sigpending); |
278 | } else { | 285 | } else { |
279 | INIT_LIST_HEAD(&q->list); | 286 | INIT_LIST_HEAD(&q->list); |
280 | q->flags = 0; | 287 | q->flags = 0; |
281 | q->user = get_uid(t->user); | 288 | q->user = get_uid(user); |
282 | } | 289 | } |
283 | return(q); | 290 | return(q); |
284 | } | 291 | } |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 0e53314b14de..d7306d0f3dfc 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -135,6 +135,7 @@ cond_syscall(sys_madvise); | |||
135 | cond_syscall(sys_mremap); | 135 | cond_syscall(sys_mremap); |
136 | cond_syscall(sys_remap_file_pages); | 136 | cond_syscall(sys_remap_file_pages); |
137 | cond_syscall(compat_sys_move_pages); | 137 | cond_syscall(compat_sys_move_pages); |
138 | cond_syscall(compat_sys_migrate_pages); | ||
138 | 139 | ||
139 | /* block-layer dependent */ | 140 | /* block-layer dependent */ |
140 | cond_syscall(sys_bdflush); | 141 | cond_syscall(sys_bdflush); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8bff2c18fb5a..09e569f4792b 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1315,7 +1315,9 @@ repeat: | |||
1315 | return -ENOTDIR; | 1315 | return -ENOTDIR; |
1316 | if (get_user(n, name)) | 1316 | if (get_user(n, name)) |
1317 | return -EFAULT; | 1317 | return -EFAULT; |
1318 | for ( ; table->ctl_name; table++) { | 1318 | for ( ; table->ctl_name || table->procname; table++) { |
1319 | if (!table->ctl_name) | ||
1320 | continue; | ||
1319 | if (n == table->ctl_name || table->ctl_name == CTL_ANY) { | 1321 | if (n == table->ctl_name || table->ctl_name == CTL_ANY) { |
1320 | int error; | 1322 | int error; |
1321 | if (table->child) { | 1323 | if (table->child) { |
@@ -1532,7 +1534,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, | |||
1532 | int len; | 1534 | int len; |
1533 | mode_t mode; | 1535 | mode_t mode; |
1534 | 1536 | ||
1535 | for (; table->ctl_name; table++) { | 1537 | for (; table->ctl_name || table->procname; table++) { |
1536 | /* Can't do anything without a proc name. */ | 1538 | /* Can't do anything without a proc name. */ |
1537 | if (!table->procname) | 1539 | if (!table->procname) |
1538 | continue; | 1540 | continue; |
@@ -1579,7 +1581,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root, | |||
1579 | static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root) | 1581 | static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root) |
1580 | { | 1582 | { |
1581 | struct proc_dir_entry *de; | 1583 | struct proc_dir_entry *de; |
1582 | for (; table->ctl_name; table++) { | 1584 | for (; table->ctl_name || table->procname; table++) { |
1583 | if (!(de = table->de)) | 1585 | if (!(de = table->de)) |
1584 | continue; | 1586 | continue; |
1585 | if (de->mode & S_IFDIR) { | 1587 | if (de->mode & S_IFDIR) { |
@@ -2680,13 +2682,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, | |||
2680 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) | 2682 | asmlinkage long sys_sysctl(struct __sysctl_args __user *args) |
2681 | { | 2683 | { |
2682 | static int msg_count; | 2684 | static int msg_count; |
2685 | struct __sysctl_args tmp; | ||
2686 | int name[CTL_MAXNAME]; | ||
2687 | int i; | ||
2688 | |||
2689 | /* Read in the sysctl name for better debug message logging */ | ||
2690 | if (copy_from_user(&tmp, args, sizeof(tmp))) | ||
2691 | return -EFAULT; | ||
2692 | if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME) | ||
2693 | return -ENOTDIR; | ||
2694 | for (i = 0; i < tmp.nlen; i++) | ||
2695 | if (get_user(name[i], tmp.name + i)) | ||
2696 | return -EFAULT; | ||
2697 | |||
2698 | /* Ignore accesses to kernel.version */ | ||
2699 | if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION)) | ||
2700 | goto out; | ||
2683 | 2701 | ||
2684 | if (msg_count < 5) { | 2702 | if (msg_count < 5) { |
2685 | msg_count++; | 2703 | msg_count++; |
2686 | printk(KERN_INFO | 2704 | printk(KERN_INFO |
2687 | "warning: process `%s' used the removed sysctl " | 2705 | "warning: process `%s' used the removed sysctl " |
2688 | "system call\n", current->comm); | 2706 | "system call with ", current->comm); |
2707 | for (i = 0; i < tmp.nlen; i++) | ||
2708 | printk("%d.", name[i]); | ||
2709 | printk("\n"); | ||
2689 | } | 2710 | } |
2711 | out: | ||
2690 | return -ENOSYS; | 2712 | return -ENOSYS; |
2691 | } | 2713 | } |
2692 | 2714 | ||
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index 5d6a8c54ee85..f45c5e70773c 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -77,7 +77,8 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, | |||
77 | /* | 77 | /* |
78 | * If new attributes are added, please revisit this allocation | 78 | * If new attributes are added, please revisit this allocation |
79 | */ | 79 | */ |
80 | skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL); | 80 | size = nlmsg_total_size(genlmsg_total_size(size)); |
81 | skb = nlmsg_new(size, GFP_KERNEL); | ||
81 | if (!skb) | 82 | if (!skb) |
82 | return -ENOMEM; | 83 | return -ENOMEM; |
83 | 84 | ||
@@ -174,21 +175,19 @@ static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu) | |||
174 | up_write(&listeners->sem); | 175 | up_write(&listeners->sem); |
175 | } | 176 | } |
176 | 177 | ||
177 | static int fill_pid(pid_t pid, struct task_struct *pidtsk, | 178 | static int fill_pid(pid_t pid, struct task_struct *tsk, |
178 | struct taskstats *stats) | 179 | struct taskstats *stats) |
179 | { | 180 | { |
180 | int rc = 0; | 181 | int rc = 0; |
181 | struct task_struct *tsk = pidtsk; | ||
182 | 182 | ||
183 | if (!pidtsk) { | 183 | if (!tsk) { |
184 | read_lock(&tasklist_lock); | 184 | rcu_read_lock(); |
185 | tsk = find_task_by_pid(pid); | 185 | tsk = find_task_by_pid(pid); |
186 | if (!tsk) { | 186 | if (tsk) |
187 | read_unlock(&tasklist_lock); | 187 | get_task_struct(tsk); |
188 | rcu_read_unlock(); | ||
189 | if (!tsk) | ||
188 | return -ESRCH; | 190 | return -ESRCH; |
189 | } | ||
190 | get_task_struct(tsk); | ||
191 | read_unlock(&tasklist_lock); | ||
192 | } else | 191 | } else |
193 | get_task_struct(tsk); | 192 | get_task_struct(tsk); |
194 | 193 | ||
@@ -214,39 +213,30 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk, | |||
214 | 213 | ||
215 | } | 214 | } |
216 | 215 | ||
217 | static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, | 216 | static int fill_tgid(pid_t tgid, struct task_struct *first, |
218 | struct taskstats *stats) | 217 | struct taskstats *stats) |
219 | { | 218 | { |
220 | struct task_struct *tsk, *first; | 219 | struct task_struct *tsk; |
221 | unsigned long flags; | 220 | unsigned long flags; |
221 | int rc = -ESRCH; | ||
222 | 222 | ||
223 | /* | 223 | /* |
224 | * Add additional stats from live tasks except zombie thread group | 224 | * Add additional stats from live tasks except zombie thread group |
225 | * leaders who are already counted with the dead tasks | 225 | * leaders who are already counted with the dead tasks |
226 | */ | 226 | */ |
227 | first = tgidtsk; | 227 | rcu_read_lock(); |
228 | if (!first) { | 228 | if (!first) |
229 | read_lock(&tasklist_lock); | ||
230 | first = find_task_by_pid(tgid); | 229 | first = find_task_by_pid(tgid); |
231 | if (!first) { | ||
232 | read_unlock(&tasklist_lock); | ||
233 | return -ESRCH; | ||
234 | } | ||
235 | get_task_struct(first); | ||
236 | read_unlock(&tasklist_lock); | ||
237 | } else | ||
238 | get_task_struct(first); | ||
239 | 230 | ||
240 | /* Start with stats from dead tasks */ | 231 | if (!first || !lock_task_sighand(first, &flags)) |
241 | spin_lock_irqsave(&first->signal->stats_lock, flags); | 232 | goto out; |
233 | |||
242 | if (first->signal->stats) | 234 | if (first->signal->stats) |
243 | memcpy(stats, first->signal->stats, sizeof(*stats)); | 235 | memcpy(stats, first->signal->stats, sizeof(*stats)); |
244 | spin_unlock_irqrestore(&first->signal->stats_lock, flags); | ||
245 | 236 | ||
246 | tsk = first; | 237 | tsk = first; |
247 | read_lock(&tasklist_lock); | ||
248 | do { | 238 | do { |
249 | if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk)) | 239 | if (tsk->exit_state) |
250 | continue; | 240 | continue; |
251 | /* | 241 | /* |
252 | * Accounting subsystem can call its functions here to | 242 | * Accounting subsystem can call its functions here to |
@@ -257,15 +247,18 @@ static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, | |||
257 | delayacct_add_tsk(stats, tsk); | 247 | delayacct_add_tsk(stats, tsk); |
258 | 248 | ||
259 | } while_each_thread(first, tsk); | 249 | } while_each_thread(first, tsk); |
260 | read_unlock(&tasklist_lock); | ||
261 | stats->version = TASKSTATS_VERSION; | ||
262 | 250 | ||
251 | unlock_task_sighand(first, &flags); | ||
252 | rc = 0; | ||
253 | out: | ||
254 | rcu_read_unlock(); | ||
255 | |||
256 | stats->version = TASKSTATS_VERSION; | ||
263 | /* | 257 | /* |
264 | * Accounting subsytems can also add calls here to modify | 258 | * Accounting subsytems can also add calls here to modify |
265 | * fields of taskstats. | 259 | * fields of taskstats. |
266 | */ | 260 | */ |
267 | 261 | return rc; | |
268 | return 0; | ||
269 | } | 262 | } |
270 | 263 | ||
271 | 264 | ||
@@ -273,7 +266,7 @@ static void fill_tgid_exit(struct task_struct *tsk) | |||
273 | { | 266 | { |
274 | unsigned long flags; | 267 | unsigned long flags; |
275 | 268 | ||
276 | spin_lock_irqsave(&tsk->signal->stats_lock, flags); | 269 | spin_lock_irqsave(&tsk->sighand->siglock, flags); |
277 | if (!tsk->signal->stats) | 270 | if (!tsk->signal->stats) |
278 | goto ret; | 271 | goto ret; |
279 | 272 | ||
@@ -285,7 +278,7 @@ static void fill_tgid_exit(struct task_struct *tsk) | |||
285 | */ | 278 | */ |
286 | delayacct_add_tsk(tsk->signal->stats, tsk); | 279 | delayacct_add_tsk(tsk->signal->stats, tsk); |
287 | ret: | 280 | ret: |
288 | spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); | 281 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); |
289 | return; | 282 | return; |
290 | } | 283 | } |
291 | 284 | ||
@@ -419,7 +412,7 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) | |||
419 | return send_reply(rep_skb, info->snd_pid); | 412 | return send_reply(rep_skb, info->snd_pid); |
420 | 413 | ||
421 | nla_put_failure: | 414 | nla_put_failure: |
422 | return genlmsg_cancel(rep_skb, reply); | 415 | rc = genlmsg_cancel(rep_skb, reply); |
423 | err: | 416 | err: |
424 | nlmsg_free(rep_skb); | 417 | nlmsg_free(rep_skb); |
425 | return rc; | 418 | return rc; |
@@ -461,24 +454,26 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
461 | size_t size; | 454 | size_t size; |
462 | int is_thread_group; | 455 | int is_thread_group; |
463 | struct nlattr *na; | 456 | struct nlattr *na; |
464 | unsigned long flags; | ||
465 | 457 | ||
466 | if (!family_registered || !tidstats) | 458 | if (!family_registered) |
467 | return; | 459 | return; |
468 | 460 | ||
469 | spin_lock_irqsave(&tsk->signal->stats_lock, flags); | ||
470 | is_thread_group = tsk->signal->stats ? 1 : 0; | ||
471 | spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); | ||
472 | |||
473 | rc = 0; | ||
474 | /* | 461 | /* |
475 | * Size includes space for nested attributes | 462 | * Size includes space for nested attributes |
476 | */ | 463 | */ |
477 | size = nla_total_size(sizeof(u32)) + | 464 | size = nla_total_size(sizeof(u32)) + |
478 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); | 465 | nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); |
479 | 466 | ||
480 | if (is_thread_group) | 467 | is_thread_group = (tsk->signal->stats != NULL); |
481 | size = 2 * size; /* PID + STATS + TGID + STATS */ | 468 | if (is_thread_group) { |
469 | /* PID + STATS + TGID + STATS */ | ||
470 | size = 2 * size; | ||
471 | /* fill the tsk->signal->stats structure */ | ||
472 | fill_tgid_exit(tsk); | ||
473 | } | ||
474 | |||
475 | if (!tidstats) | ||
476 | return; | ||
482 | 477 | ||
483 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); | 478 | rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); |
484 | if (rc < 0) | 479 | if (rc < 0) |
@@ -498,11 +493,8 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats, | |||
498 | goto send; | 493 | goto send; |
499 | 494 | ||
500 | /* | 495 | /* |
501 | * tsk has/had a thread group so fill the tsk->signal->stats structure | ||
502 | * Doesn't matter if tsk is the leader or the last group member leaving | 496 | * Doesn't matter if tsk is the leader or the last group member leaving |
503 | */ | 497 | */ |
504 | |||
505 | fill_tgid_exit(tsk); | ||
506 | if (!group_dead) | 498 | if (!group_dead) |
507 | goto send; | 499 | goto send; |
508 | 500 | ||
@@ -519,7 +511,6 @@ send: | |||
519 | 511 | ||
520 | nla_put_failure: | 512 | nla_put_failure: |
521 | genlmsg_cancel(rep_skb, reply); | 513 | genlmsg_cancel(rep_skb, reply); |
522 | goto ret; | ||
523 | err_skb: | 514 | err_skb: |
524 | nlmsg_free(rep_skb); | 515 | nlmsg_free(rep_skb); |
525 | ret: | 516 | ret: |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 47195fa0ec4f..3afeaa3a73f9 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -161,9 +161,9 @@ void second_overflow(void) | |||
161 | time_adjust += MAX_TICKADJ; | 161 | time_adjust += MAX_TICKADJ; |
162 | tick_length -= MAX_TICKADJ_SCALED; | 162 | tick_length -= MAX_TICKADJ_SCALED; |
163 | } else { | 163 | } else { |
164 | time_adjust = 0; | ||
165 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / | 164 | tick_length += (s64)(time_adjust * NSEC_PER_USEC / |
166 | HZ) << TICK_LENGTH_SHIFT; | 165 | HZ) << TICK_LENGTH_SHIFT; |
166 | time_adjust = 0; | ||
167 | } | 167 | } |
168 | } | 168 | } |
169 | } | 169 | } |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index db443221ba5b..96f77013d3f0 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -36,7 +36,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
36 | 36 | ||
37 | /* calculate task elapsed time in timespec */ | 37 | /* calculate task elapsed time in timespec */ |
38 | do_posix_clock_monotonic_gettime(&uptime); | 38 | do_posix_clock_monotonic_gettime(&uptime); |
39 | ts = timespec_sub(uptime, current->group_leader->start_time); | 39 | ts = timespec_sub(uptime, tsk->start_time); |
40 | /* rebase elapsed time to usec */ | 40 | /* rebase elapsed time to usec */ |
41 | ac_etime = timespec_to_ns(&ts); | 41 | ac_etime = timespec_to_ns(&ts); |
42 | do_div(ac_etime, NSEC_PER_USEC); | 42 | do_div(ac_etime, NSEC_PER_USEC); |
@@ -58,7 +58,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
58 | stats->ac_uid = tsk->uid; | 58 | stats->ac_uid = tsk->uid; |
59 | stats->ac_gid = tsk->gid; | 59 | stats->ac_gid = tsk->gid; |
60 | stats->ac_pid = tsk->pid; | 60 | stats->ac_pid = tsk->pid; |
61 | stats->ac_ppid = (tsk->parent) ? tsk->parent->pid : 0; | 61 | rcu_read_lock(); |
62 | stats->ac_ppid = pid_alive(tsk) ? | ||
63 | rcu_dereference(tsk->real_parent)->tgid : 0; | ||
64 | rcu_read_unlock(); | ||
62 | stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; | 65 | stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; |
63 | stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; | 66 | stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; |
64 | stats->ac_minflt = tsk->min_flt; | 67 | stats->ac_minflt = tsk->min_flt; |
@@ -77,13 +80,17 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
77 | */ | 80 | */ |
78 | void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | 81 | void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) |
79 | { | 82 | { |
83 | struct mm_struct *mm; | ||
84 | |||
80 | /* convert pages-jiffies to Mbyte-usec */ | 85 | /* convert pages-jiffies to Mbyte-usec */ |
81 | stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; | 86 | stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; |
82 | stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; | 87 | stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; |
83 | if (p->mm) { | 88 | mm = get_task_mm(p); |
89 | if (mm) { | ||
84 | /* adjust to KB unit */ | 90 | /* adjust to KB unit */ |
85 | stats->hiwater_rss = p->mm->hiwater_rss * PAGE_SIZE / KB; | 91 | stats->hiwater_rss = mm->hiwater_rss * PAGE_SIZE / KB; |
86 | stats->hiwater_vm = p->mm->hiwater_vm * PAGE_SIZE / KB; | 92 | stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB; |
93 | mmput(mm); | ||
87 | } | 94 | } |
88 | stats->read_char = p->rchar; | 95 | stats->read_char = p->rchar; |
89 | stats->write_char = p->wchar; | 96 | stats->write_char = p->wchar; |
diff --git a/kernel/user.c b/kernel/user.c index 6408c0424291..220e586127a0 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -187,6 +187,17 @@ void switch_uid(struct user_struct *new_user) | |||
187 | atomic_dec(&old_user->processes); | 187 | atomic_dec(&old_user->processes); |
188 | switch_uid_keyring(new_user); | 188 | switch_uid_keyring(new_user); |
189 | current->user = new_user; | 189 | current->user = new_user; |
190 | |||
191 | /* | ||
192 | * We need to synchronize with __sigqueue_alloc() | ||
193 | * doing a get_uid(p->user).. If that saw the old | ||
194 | * user value, we need to wait until it has exited | ||
195 | * its critical region before we can free the old | ||
196 | * structure. | ||
197 | */ | ||
198 | smp_mb(); | ||
199 | spin_unlock_wait(¤t->sighand->siglock); | ||
200 | |||
190 | free_uid(old_user); | 201 | free_uid(old_user); |
191 | suid_keys(current); | 202 | suid_keys(current); |
192 | } | 203 | } |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3df9bfc7ff78..17c2f03d2c27 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -99,7 +99,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
99 | * @wq: workqueue to use | 99 | * @wq: workqueue to use |
100 | * @work: work to queue | 100 | * @work: work to queue |
101 | * | 101 | * |
102 | * Returns non-zero if it was successfully added. | 102 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
103 | * | 103 | * |
104 | * We queue the work to the CPU it was submitted, but there is no | 104 | * We queue the work to the CPU it was submitted, but there is no |
105 | * guarantee that it will be processed by that CPU. | 105 | * guarantee that it will be processed by that CPU. |
@@ -138,7 +138,7 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
138 | * @work: work to queue | 138 | * @work: work to queue |
139 | * @delay: number of jiffies to wait before queueing | 139 | * @delay: number of jiffies to wait before queueing |
140 | * | 140 | * |
141 | * Returns non-zero if it was successfully added. | 141 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
142 | */ | 142 | */ |
143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 143 | int fastcall queue_delayed_work(struct workqueue_struct *wq, |
144 | struct work_struct *work, unsigned long delay) | 144 | struct work_struct *work, unsigned long delay) |
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work); | |||
169 | * @work: work to queue | 169 | * @work: work to queue |
170 | * @delay: number of jiffies to wait before queueing | 170 | * @delay: number of jiffies to wait before queueing |
171 | * | 171 | * |
172 | * Returns non-zero if it was successfully added. | 172 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
173 | */ | 173 | */ |
174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 174 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
175 | struct work_struct *work, unsigned long delay) | 175 | struct work_struct *work, unsigned long delay) |