aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c35
-rw-r--r--kernel/cpu.c38
-rw-r--r--kernel/delayacct.c15
-rw-r--r--kernel/exit.c1
-rw-r--r--kernel/fork.c12
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/irq/chip.c35
-rw-r--r--kernel/irq/handle.c4
-rw-r--r--kernel/irq/manage.c9
-rw-r--r--kernel/kmod.c8
-rw-r--r--kernel/lockdep.c21
-rw-r--r--kernel/module.c35
-rw-r--r--kernel/mutex-debug.c2
-rw-r--r--kernel/nsproxy.c6
-rw-r--r--kernel/posix-cpu-timers.c27
-rw-r--r--kernel/power/disk.c37
-rw-r--r--kernel/power/swap.c3
-rw-r--r--kernel/printk.c21
-rw-r--r--kernel/sched.c18
-rw-r--r--kernel/signal.c15
-rw-r--r--kernel/spinlock.c21
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/sysctl.c34
-rw-r--r--kernel/taskstats.c94
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/tsacct.c17
-rw-r--r--kernel/unwind.c327
-rw-r--r--kernel/user.c11
-rw-r--r--kernel/workqueue.c6
30 files changed, 642 insertions, 223 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 75573e5d27..6952dd0573 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -678,7 +678,7 @@ int get_compat_sigevent(struct sigevent *event,
678 ? -EFAULT : 0; 678 ? -EFAULT : 0;
679} 679}
680 680
681long compat_get_bitmap(unsigned long *mask, compat_ulong_t __user *umask, 681long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
682 unsigned long bitmap_size) 682 unsigned long bitmap_size)
683{ 683{
684 int i, j; 684 int i, j;
@@ -982,4 +982,37 @@ asmlinkage long compat_sys_move_pages(pid_t pid, unsigned long nr_pages,
982 } 982 }
983 return sys_move_pages(pid, nr_pages, pages, nodes, status, flags); 983 return sys_move_pages(pid, nr_pages, pages, nodes, status, flags);
984} 984}
985
986asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
987 compat_ulong_t maxnode,
988 const compat_ulong_t __user *old_nodes,
989 const compat_ulong_t __user *new_nodes)
990{
991 unsigned long __user *old = NULL;
992 unsigned long __user *new = NULL;
993 nodemask_t tmp_mask;
994 unsigned long nr_bits;
995 unsigned long size;
996
997 nr_bits = min_t(unsigned long, maxnode - 1, MAX_NUMNODES);
998 size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
999 if (old_nodes) {
1000 if (compat_get_bitmap(nodes_addr(tmp_mask), old_nodes, nr_bits))
1001 return -EFAULT;
1002 old = compat_alloc_user_space(new_nodes ? size * 2 : size);
1003 if (new_nodes)
1004 new = old + size / sizeof(unsigned long);
1005 if (copy_to_user(old, nodes_addr(tmp_mask), size))
1006 return -EFAULT;
1007 }
1008 if (new_nodes) {
1009 if (compat_get_bitmap(nodes_addr(tmp_mask), new_nodes, nr_bits))
1010 return -EFAULT;
1011 if (new == NULL)
1012 new = compat_alloc_user_space(size);
1013 if (copy_to_user(new, nodes_addr(tmp_mask), size))
1014 return -EFAULT;
1015 }
1016 return sys_migrate_pages(pid, nr_bits + 1, old, new);
1017}
985#endif 1018#endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 32c9662846..272254f20d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -19,7 +19,7 @@
19static DEFINE_MUTEX(cpu_add_remove_lock); 19static DEFINE_MUTEX(cpu_add_remove_lock);
20static DEFINE_MUTEX(cpu_bitmask_lock); 20static DEFINE_MUTEX(cpu_bitmask_lock);
21 21
22static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
23 23
24/* If set, cpu_up and cpu_down will return -EBUSY and do nothing. 24/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
25 * Should always be manipulated under cpu_add_remove_lock 25 * Should always be manipulated under cpu_add_remove_lock
@@ -58,8 +58,8 @@ void unlock_cpu_hotplug(void)
58 recursive_depth--; 58 recursive_depth--;
59 return; 59 return;
60 } 60 }
61 mutex_unlock(&cpu_bitmask_lock);
62 recursive = NULL; 61 recursive = NULL;
62 mutex_unlock(&cpu_bitmask_lock);
63} 63}
64EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); 64EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
65 65
@@ -68,7 +68,11 @@ EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
68/* Need to know about CPUs going up/down? */ 68/* Need to know about CPUs going up/down? */
69int __cpuinit register_cpu_notifier(struct notifier_block *nb) 69int __cpuinit register_cpu_notifier(struct notifier_block *nb)
70{ 70{
71 return blocking_notifier_chain_register(&cpu_chain, nb); 71 int ret;
72 mutex_lock(&cpu_add_remove_lock);
73 ret = raw_notifier_chain_register(&cpu_chain, nb);
74 mutex_unlock(&cpu_add_remove_lock);
75 return ret;
72} 76}
73 77
74#ifdef CONFIG_HOTPLUG_CPU 78#ifdef CONFIG_HOTPLUG_CPU
@@ -77,7 +81,9 @@ EXPORT_SYMBOL(register_cpu_notifier);
77 81
78void unregister_cpu_notifier(struct notifier_block *nb) 82void unregister_cpu_notifier(struct notifier_block *nb)
79{ 83{
80 blocking_notifier_chain_unregister(&cpu_chain, nb); 84 mutex_lock(&cpu_add_remove_lock);
85 raw_notifier_chain_unregister(&cpu_chain, nb);
86 mutex_unlock(&cpu_add_remove_lock);
81} 87}
82EXPORT_SYMBOL(unregister_cpu_notifier); 88EXPORT_SYMBOL(unregister_cpu_notifier);
83 89
@@ -126,7 +132,7 @@ static int _cpu_down(unsigned int cpu)
126 if (!cpu_online(cpu)) 132 if (!cpu_online(cpu))
127 return -EINVAL; 133 return -EINVAL;
128 134
129 err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, 135 err = raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
130 (void *)(long)cpu); 136 (void *)(long)cpu);
131 if (err == NOTIFY_BAD) { 137 if (err == NOTIFY_BAD) {
132 printk("%s: attempt to take down CPU %u failed\n", 138 printk("%s: attempt to take down CPU %u failed\n",
@@ -144,18 +150,18 @@ static int _cpu_down(unsigned int cpu)
144 p = __stop_machine_run(take_cpu_down, NULL, cpu); 150 p = __stop_machine_run(take_cpu_down, NULL, cpu);
145 mutex_unlock(&cpu_bitmask_lock); 151 mutex_unlock(&cpu_bitmask_lock);
146 152
147 if (IS_ERR(p)) { 153 if (IS_ERR(p) || cpu_online(cpu)) {
148 /* CPU didn't die: tell everyone. Can't complain. */ 154 /* CPU didn't die: tell everyone. Can't complain. */
149 if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, 155 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
150 (void *)(long)cpu) == NOTIFY_BAD) 156 (void *)(long)cpu) == NOTIFY_BAD)
151 BUG(); 157 BUG();
152 158
153 err = PTR_ERR(p); 159 if (IS_ERR(p)) {
154 goto out_allowed; 160 err = PTR_ERR(p);
155 } 161 goto out_allowed;
156 162 }
157 if (cpu_online(cpu))
158 goto out_thread; 163 goto out_thread;
164 }
159 165
160 /* Wait for it to sleep (leaving idle task). */ 166 /* Wait for it to sleep (leaving idle task). */
161 while (!idle_cpu(cpu)) 167 while (!idle_cpu(cpu))
@@ -169,7 +175,7 @@ static int _cpu_down(unsigned int cpu)
169 put_cpu(); 175 put_cpu();
170 176
171 /* CPU is completely dead: tell everyone. Too late to complain. */ 177 /* CPU is completely dead: tell everyone. Too late to complain. */
172 if (blocking_notifier_call_chain(&cpu_chain, CPU_DEAD, 178 if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD,
173 (void *)(long)cpu) == NOTIFY_BAD) 179 (void *)(long)cpu) == NOTIFY_BAD)
174 BUG(); 180 BUG();
175 181
@@ -206,7 +212,7 @@ static int __devinit _cpu_up(unsigned int cpu)
206 if (cpu_online(cpu) || !cpu_present(cpu)) 212 if (cpu_online(cpu) || !cpu_present(cpu))
207 return -EINVAL; 213 return -EINVAL;
208 214
209 ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); 215 ret = raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
210 if (ret == NOTIFY_BAD) { 216 if (ret == NOTIFY_BAD) {
211 printk("%s: attempt to bring up CPU %u failed\n", 217 printk("%s: attempt to bring up CPU %u failed\n",
212 __FUNCTION__, cpu); 218 __FUNCTION__, cpu);
@@ -223,11 +229,11 @@ static int __devinit _cpu_up(unsigned int cpu)
223 BUG_ON(!cpu_online(cpu)); 229 BUG_ON(!cpu_online(cpu));
224 230
225 /* Now call notifier in preparation. */ 231 /* Now call notifier in preparation. */
226 blocking_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu); 232 raw_notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
227 233
228out_notify: 234out_notify:
229 if (ret != 0) 235 if (ret != 0)
230 blocking_notifier_call_chain(&cpu_chain, 236 raw_notifier_call_chain(&cpu_chain,
231 CPU_UP_CANCELED, hcpu); 237 CPU_UP_CANCELED, hcpu);
232 238
233 return ret; 239 return ret;
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 36752f124c..66a0ea4875 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -66,6 +66,7 @@ static void delayacct_end(struct timespec *start, struct timespec *end,
66{ 66{
67 struct timespec ts; 67 struct timespec ts;
68 s64 ns; 68 s64 ns;
69 unsigned long flags;
69 70
70 do_posix_clock_monotonic_gettime(end); 71 do_posix_clock_monotonic_gettime(end);
71 ts = timespec_sub(*end, *start); 72 ts = timespec_sub(*end, *start);
@@ -73,10 +74,10 @@ static void delayacct_end(struct timespec *start, struct timespec *end,
73 if (ns < 0) 74 if (ns < 0)
74 return; 75 return;
75 76
76 spin_lock(&current->delays->lock); 77 spin_lock_irqsave(&current->delays->lock, flags);
77 *total += ns; 78 *total += ns;
78 (*count)++; 79 (*count)++;
79 spin_unlock(&current->delays->lock); 80 spin_unlock_irqrestore(&current->delays->lock, flags);
80} 81}
81 82
82void __delayacct_blkio_start(void) 83void __delayacct_blkio_start(void)
@@ -104,6 +105,7 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
104 s64 tmp; 105 s64 tmp;
105 struct timespec ts; 106 struct timespec ts;
106 unsigned long t1,t2,t3; 107 unsigned long t1,t2,t3;
108 unsigned long flags;
107 109
108 /* Though tsk->delays accessed later, early exit avoids 110 /* Though tsk->delays accessed later, early exit avoids
109 * unnecessary returning of other data 111 * unnecessary returning of other data
@@ -136,14 +138,14 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
136 138
137 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ 139 /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
138 140
139 spin_lock(&tsk->delays->lock); 141 spin_lock_irqsave(&tsk->delays->lock, flags);
140 tmp = d->blkio_delay_total + tsk->delays->blkio_delay; 142 tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
141 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; 143 d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
142 tmp = d->swapin_delay_total + tsk->delays->swapin_delay; 144 tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
143 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; 145 d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
144 d->blkio_count += tsk->delays->blkio_count; 146 d->blkio_count += tsk->delays->blkio_count;
145 d->swapin_count += tsk->delays->swapin_count; 147 d->swapin_count += tsk->delays->swapin_count;
146 spin_unlock(&tsk->delays->lock); 148 spin_unlock_irqrestore(&tsk->delays->lock, flags);
147 149
148done: 150done:
149 return 0; 151 return 0;
@@ -152,11 +154,12 @@ done:
152__u64 __delayacct_blkio_ticks(struct task_struct *tsk) 154__u64 __delayacct_blkio_ticks(struct task_struct *tsk)
153{ 155{
154 __u64 ret; 156 __u64 ret;
157 unsigned long flags;
155 158
156 spin_lock(&tsk->delays->lock); 159 spin_lock_irqsave(&tsk->delays->lock, flags);
157 ret = nsec_to_clock_t(tsk->delays->blkio_delay + 160 ret = nsec_to_clock_t(tsk->delays->blkio_delay +
158 tsk->delays->swapin_delay); 161 tsk->delays->swapin_delay);
159 spin_unlock(&tsk->delays->lock); 162 spin_unlock_irqrestore(&tsk->delays->lock, flags);
160 return ret; 163 return ret;
161} 164}
162 165
diff --git a/kernel/exit.c b/kernel/exit.c
index f250a5e3e2..06de6c4e8c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -128,6 +128,7 @@ static void __exit_signal(struct task_struct *tsk)
128 flush_sigqueue(&tsk->pending); 128 flush_sigqueue(&tsk->pending);
129 if (sig) { 129 if (sig) {
130 flush_sigqueue(&sig->shared_pending); 130 flush_sigqueue(&sig->shared_pending);
131 taskstats_tgid_free(sig);
131 __cleanup_signal(sig); 132 __cleanup_signal(sig);
132 } 133 }
133} 134}
diff --git a/kernel/fork.c b/kernel/fork.c
index 7dc6140baa..8cdd3e72ba 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -830,7 +830,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
830 if (clone_flags & CLONE_THREAD) { 830 if (clone_flags & CLONE_THREAD) {
831 atomic_inc(&current->signal->count); 831 atomic_inc(&current->signal->count);
832 atomic_inc(&current->signal->live); 832 atomic_inc(&current->signal->live);
833 taskstats_tgid_alloc(current->signal); 833 taskstats_tgid_alloc(current);
834 return 0; 834 return 0;
835 } 835 }
836 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 836 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
@@ -897,7 +897,6 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
897void __cleanup_signal(struct signal_struct *sig) 897void __cleanup_signal(struct signal_struct *sig)
898{ 898{
899 exit_thread_group_keys(sig); 899 exit_thread_group_keys(sig);
900 taskstats_tgid_free(sig);
901 kmem_cache_free(signal_cachep, sig); 900 kmem_cache_free(signal_cachep, sig);
902} 901}
903 902
@@ -984,6 +983,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
984 if (!p) 983 if (!p)
985 goto fork_out; 984 goto fork_out;
986 985
986 rt_mutex_init_task(p);
987
987#ifdef CONFIG_TRACE_IRQFLAGS 988#ifdef CONFIG_TRACE_IRQFLAGS
988 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled); 989 DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
989 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 990 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
@@ -1088,8 +1089,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1088 p->lockdep_recursion = 0; 1089 p->lockdep_recursion = 0;
1089#endif 1090#endif
1090 1091
1091 rt_mutex_init_task(p);
1092
1093#ifdef CONFIG_DEBUG_MUTEXES 1092#ifdef CONFIG_DEBUG_MUTEXES
1094 p->blocked_on = NULL; /* not blocked yet */ 1093 p->blocked_on = NULL; /* not blocked yet */
1095#endif 1094#endif
@@ -1316,9 +1315,8 @@ struct task_struct * __devinit fork_idle(int cpu)
1316 struct pt_regs regs; 1315 struct pt_regs regs;
1317 1316
1318 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0); 1317 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, NULL, 0);
1319 if (!task) 1318 if (!IS_ERR(task))
1320 return ERR_PTR(-ENOMEM); 1319 init_idle(task, cpu);
1321 init_idle(task, cpu);
1322 1320
1323 return task; 1321 return task;
1324} 1322}
diff --git a/kernel/futex.c b/kernel/futex.c
index b364e00261..93ef30ba20 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1507,6 +1507,13 @@ static int futex_fd(u32 __user *uaddr, int signal)
1507 struct futex_q *q; 1507 struct futex_q *q;
1508 struct file *filp; 1508 struct file *filp;
1509 int ret, err; 1509 int ret, err;
1510 static unsigned long printk_interval;
1511
1512 if (printk_timed_ratelimit(&printk_interval, 60 * 60 * 1000)) {
1513 printk(KERN_WARNING "Process `%s' used FUTEX_FD, which "
1514 "will be removed from the kernel in June 2007\n",
1515 current->comm);
1516 }
1510 1517
1511 ret = -EINVAL; 1518 ret = -EINVAL;
1512 if (!valid_signal(signal)) 1519 if (!valid_signal(signal))
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 11c99697ac..ebfd24a418 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -233,6 +233,8 @@ void irq_chip_set_defaults(struct irq_chip *chip)
233 chip->shutdown = chip->disable; 233 chip->shutdown = chip->disable;
234 if (!chip->name) 234 if (!chip->name)
235 chip->name = chip->typename; 235 chip->name = chip->typename;
236 if (!chip->end)
237 chip->end = dummy_irq_chip.end;
236} 238}
237 239
238static inline void mask_ack_irq(struct irq_desc *desc, int irq) 240static inline void mask_ack_irq(struct irq_desc *desc, int irq)
@@ -499,7 +501,8 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
499#endif /* CONFIG_SMP */ 501#endif /* CONFIG_SMP */
500 502
501void 503void
502__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained) 504__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
505 const char *name)
503{ 506{
504 struct irq_desc *desc; 507 struct irq_desc *desc;
505 unsigned long flags; 508 unsigned long flags;
@@ -540,6 +543,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained)
540 desc->depth = 1; 543 desc->depth = 1;
541 } 544 }
542 desc->handle_irq = handle; 545 desc->handle_irq = handle;
546 desc->name = name;
543 547
544 if (handle != handle_bad_irq && is_chained) { 548 if (handle != handle_bad_irq && is_chained) {
545 desc->status &= ~IRQ_DISABLED; 549 desc->status &= ~IRQ_DISABLED;
@@ -555,30 +559,13 @@ set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
555 irq_flow_handler_t handle) 559 irq_flow_handler_t handle)
556{ 560{
557 set_irq_chip(irq, chip); 561 set_irq_chip(irq, chip);
558 __set_irq_handler(irq, handle, 0); 562 __set_irq_handler(irq, handle, 0, NULL);
559} 563}
560 564
561/* 565void
562 * Get a descriptive string for the highlevel handler, for 566set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
563 * /proc/interrupts output: 567 irq_flow_handler_t handle, const char *name)
564 */
565const char *
566handle_irq_name(irq_flow_handler_t handle)
567{ 568{
568 if (handle == handle_level_irq) 569 set_irq_chip(irq, chip);
569 return "level "; 570 __set_irq_handler(irq, handle, 0, name);
570 if (handle == handle_fasteoi_irq)
571 return "fasteoi";
572 if (handle == handle_edge_irq)
573 return "edge ";
574 if (handle == handle_simple_irq)
575 return "simple ";
576#ifdef CONFIG_SMP
577 if (handle == handle_percpu_irq)
578 return "percpu ";
579#endif
580 if (handle == handle_bad_irq)
581 return "bad ";
582
583 return NULL;
584} 571}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 42aa6f1a3f..a681912bc8 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -231,10 +231,10 @@ fastcall unsigned int __do_IRQ(unsigned int irq)
231 spin_unlock(&desc->lock); 231 spin_unlock(&desc->lock);
232 232
233 action_ret = handle_IRQ_event(irq, action); 233 action_ret = handle_IRQ_event(irq, action);
234
235 spin_lock(&desc->lock);
236 if (!noirqdebug) 234 if (!noirqdebug)
237 note_interrupt(irq, desc, action_ret); 235 note_interrupt(irq, desc, action_ret);
236
237 spin_lock(&desc->lock);
238 if (likely(!(desc->status & IRQ_PENDING))) 238 if (likely(!(desc->status & IRQ_PENDING)))
239 break; 239 break;
240 desc->status &= ~IRQ_PENDING; 240 desc->status &= ~IRQ_PENDING;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6879202afe..b385878c6e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -216,6 +216,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
216{ 216{
217 struct irq_desc *desc = irq_desc + irq; 217 struct irq_desc *desc = irq_desc + irq;
218 struct irqaction *old, **p; 218 struct irqaction *old, **p;
219 const char *old_name = NULL;
219 unsigned long flags; 220 unsigned long flags;
220 int shared = 0; 221 int shared = 0;
221 222
@@ -255,8 +256,10 @@ int setup_irq(unsigned int irq, struct irqaction *new)
255 * set the trigger type must match. 256 * set the trigger type must match.
256 */ 257 */
257 if (!((old->flags & new->flags) & IRQF_SHARED) || 258 if (!((old->flags & new->flags) & IRQF_SHARED) ||
258 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) 259 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) {
260 old_name = old->name;
259 goto mismatch; 261 goto mismatch;
262 }
260 263
261#if defined(CONFIG_IRQ_PER_CPU) 264#if defined(CONFIG_IRQ_PER_CPU)
262 /* All handlers must agree on per-cpuness */ 265 /* All handlers must agree on per-cpuness */
@@ -322,11 +325,13 @@ int setup_irq(unsigned int irq, struct irqaction *new)
322 return 0; 325 return 0;
323 326
324mismatch: 327mismatch:
325 spin_unlock_irqrestore(&desc->lock, flags);
326 if (!(new->flags & IRQF_PROBE_SHARED)) { 328 if (!(new->flags & IRQF_PROBE_SHARED)) {
327 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 329 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
330 if (old_name)
331 printk(KERN_ERR "current handler: %s\n", old_name);
328 dump_stack(); 332 dump_stack();
329 } 333 }
334 spin_unlock_irqrestore(&desc->lock, flags);
330 return -EBUSY; 335 return -EBUSY;
331} 336}
332 337
diff --git a/kernel/kmod.c b/kernel/kmod.c
index bb4e29d924..2b76dee284 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -307,14 +307,14 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp,
307 return 0; 307 return 0;
308 308
309 f = create_write_pipe(); 309 f = create_write_pipe();
310 if (!f) 310 if (IS_ERR(f))
311 return -ENOMEM; 311 return PTR_ERR(f);
312 *filp = f; 312 *filp = f;
313 313
314 f = create_read_pipe(f); 314 f = create_read_pipe(f);
315 if (!f) { 315 if (IS_ERR(f)) {
316 free_write_pipe(*filp); 316 free_write_pipe(*filp);
317 return -ENOMEM; 317 return PTR_ERR(f);
318 } 318 }
319 sub_info.stdin = f; 319 sub_info.stdin = f;
320 320
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 805a322a56..c9fefdb1a7 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -575,6 +575,8 @@ static noinline int print_circular_bug_tail(void)
575 return 0; 575 return 0;
576} 576}
577 577
578#define RECURSION_LIMIT 40
579
578static int noinline print_infinite_recursion_bug(void) 580static int noinline print_infinite_recursion_bug(void)
579{ 581{
580 __raw_spin_unlock(&hash_lock); 582 __raw_spin_unlock(&hash_lock);
@@ -595,7 +597,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
595 debug_atomic_inc(&nr_cyclic_check_recursions); 597 debug_atomic_inc(&nr_cyclic_check_recursions);
596 if (depth > max_recursion_depth) 598 if (depth > max_recursion_depth)
597 max_recursion_depth = depth; 599 max_recursion_depth = depth;
598 if (depth >= 20) 600 if (depth >= RECURSION_LIMIT)
599 return print_infinite_recursion_bug(); 601 return print_infinite_recursion_bug();
600 /* 602 /*
601 * Check this lock's dependency list: 603 * Check this lock's dependency list:
@@ -645,7 +647,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
645 647
646 if (depth > max_recursion_depth) 648 if (depth > max_recursion_depth)
647 max_recursion_depth = depth; 649 max_recursion_depth = depth;
648 if (depth >= 20) 650 if (depth >= RECURSION_LIMIT)
649 return print_infinite_recursion_bug(); 651 return print_infinite_recursion_bug();
650 652
651 debug_atomic_inc(&nr_find_usage_forwards_checks); 653 debug_atomic_inc(&nr_find_usage_forwards_checks);
@@ -684,7 +686,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
684 686
685 if (depth > max_recursion_depth) 687 if (depth > max_recursion_depth)
686 max_recursion_depth = depth; 688 max_recursion_depth = depth;
687 if (depth >= 20) 689 if (depth >= RECURSION_LIMIT)
688 return print_infinite_recursion_bug(); 690 return print_infinite_recursion_bug();
689 691
690 debug_atomic_inc(&nr_find_usage_backwards_checks); 692 debug_atomic_inc(&nr_find_usage_backwards_checks);
@@ -1079,7 +1081,8 @@ static int static_obj(void *obj)
1079 */ 1081 */
1080 for_each_possible_cpu(i) { 1082 for_each_possible_cpu(i) {
1081 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); 1083 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1082 end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); 1084 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
1085 + per_cpu_offset(i);
1083 1086
1084 if ((addr >= start) && (addr < end)) 1087 if ((addr >= start) && (addr < end))
1085 return 1; 1088 return 1;
@@ -1174,7 +1177,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1174 * itself, so actual lookup of the hash should be once per lock object. 1177 * itself, so actual lookup of the hash should be once per lock object.
1175 */ 1178 */
1176static inline struct lock_class * 1179static inline struct lock_class *
1177register_lock_class(struct lockdep_map *lock, unsigned int subclass) 1180register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1178{ 1181{
1179 struct lockdep_subclass_key *key; 1182 struct lockdep_subclass_key *key;
1180 struct list_head *hash_head; 1183 struct list_head *hash_head;
@@ -1246,7 +1249,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1246out_unlock_set: 1249out_unlock_set:
1247 __raw_spin_unlock(&hash_lock); 1250 __raw_spin_unlock(&hash_lock);
1248 1251
1249 if (!subclass) 1252 if (!subclass || force)
1250 lock->class_cache = class; 1253 lock->class_cache = class;
1251 1254
1252 DEBUG_LOCKS_WARN_ON(class->subclass != subclass); 1255 DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
@@ -1934,7 +1937,7 @@ void trace_softirqs_off(unsigned long ip)
1934 * Initialize a lock instance's lock-class mapping info: 1937 * Initialize a lock instance's lock-class mapping info:
1935 */ 1938 */
1936void lockdep_init_map(struct lockdep_map *lock, const char *name, 1939void lockdep_init_map(struct lockdep_map *lock, const char *name,
1937 struct lock_class_key *key) 1940 struct lock_class_key *key, int subclass)
1938{ 1941{
1939 if (unlikely(!debug_locks)) 1942 if (unlikely(!debug_locks))
1940 return; 1943 return;
@@ -1954,6 +1957,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
1954 lock->name = name; 1957 lock->name = name;
1955 lock->key = key; 1958 lock->key = key;
1956 lock->class_cache = NULL; 1959 lock->class_cache = NULL;
1960 if (subclass)
1961 register_lock_class(lock, subclass, 1);
1957} 1962}
1958 1963
1959EXPORT_SYMBOL_GPL(lockdep_init_map); 1964EXPORT_SYMBOL_GPL(lockdep_init_map);
@@ -1992,7 +1997,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1992 * Not cached yet or subclass? 1997 * Not cached yet or subclass?
1993 */ 1998 */
1994 if (unlikely(!class)) { 1999 if (unlikely(!class)) {
1995 class = register_lock_class(lock, subclass); 2000 class = register_lock_class(lock, subclass, 0);
1996 if (!class) 2001 if (!class)
1997 return 0; 2002 return 0;
1998 } 2003 }
diff --git a/kernel/module.c b/kernel/module.c
index 67009bd56c..45e01cb601 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1086,22 +1086,35 @@ static int mod_sysfs_setup(struct module *mod,
1086 goto out; 1086 goto out;
1087 kobj_set_kset_s(&mod->mkobj, module_subsys); 1087 kobj_set_kset_s(&mod->mkobj, module_subsys);
1088 mod->mkobj.mod = mod; 1088 mod->mkobj.mod = mod;
1089 err = kobject_register(&mod->mkobj.kobj); 1089
1090 /* delay uevent until full sysfs population */
1091 kobject_init(&mod->mkobj.kobj);
1092 err = kobject_add(&mod->mkobj.kobj);
1090 if (err) 1093 if (err)
1091 goto out; 1094 goto out;
1092 1095
1096 mod->drivers_dir = kobject_add_dir(&mod->mkobj.kobj, "drivers");
1097 if (!mod->drivers_dir)
1098 goto out_unreg;
1099
1093 err = module_param_sysfs_setup(mod, kparam, num_params); 1100 err = module_param_sysfs_setup(mod, kparam, num_params);
1094 if (err) 1101 if (err)
1095 goto out_unreg; 1102 goto out_unreg_drivers;
1096 1103
1097 err = module_add_modinfo_attrs(mod); 1104 err = module_add_modinfo_attrs(mod);
1098 if (err) 1105 if (err)
1099 goto out_unreg; 1106 goto out_unreg_param;
1100 1107
1108 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1101 return 0; 1109 return 0;
1102 1110
1111out_unreg_drivers:
1112 kobject_unregister(mod->drivers_dir);
1113out_unreg_param:
1114 module_param_sysfs_remove(mod);
1103out_unreg: 1115out_unreg:
1104 kobject_unregister(&mod->mkobj.kobj); 1116 kobject_del(&mod->mkobj.kobj);
1117 kobject_put(&mod->mkobj.kobj);
1105out: 1118out:
1106 return err; 1119 return err;
1107} 1120}
@@ -1110,6 +1123,7 @@ static void mod_kobject_remove(struct module *mod)
1110{ 1123{
1111 module_remove_modinfo_attrs(mod); 1124 module_remove_modinfo_attrs(mod);
1112 module_param_sysfs_remove(mod); 1125 module_param_sysfs_remove(mod);
1126 kobject_unregister(mod->drivers_dir);
1113 1127
1114 kobject_unregister(&mod->mkobj.kobj); 1128 kobject_unregister(&mod->mkobj.kobj);
1115} 1129}
@@ -1342,7 +1356,7 @@ static void set_license(struct module *mod, const char *license)
1342 1356
1343 if (!license_is_gpl_compatible(license)) { 1357 if (!license_is_gpl_compatible(license)) {
1344 if (!(tainted & TAINT_PROPRIETARY_MODULE)) 1358 if (!(tainted & TAINT_PROPRIETARY_MODULE))
1345 printk(KERN_WARNING "%s: module license '%s' taints" 1359 printk(KERN_WARNING "%s: module license '%s' taints "
1346 "kernel.\n", mod->name, license); 1360 "kernel.\n", mod->name, license);
1347 add_taint_module(mod, TAINT_PROPRIETARY_MODULE); 1361 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1348 } 1362 }
@@ -1718,7 +1732,7 @@ static struct module *load_module(void __user *umod,
1718 set_license(mod, get_modinfo(sechdrs, infoindex, "license")); 1732 set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
1719 1733
1720 if (strcmp(mod->name, "ndiswrapper") == 0) 1734 if (strcmp(mod->name, "ndiswrapper") == 0)
1721 add_taint_module(mod, TAINT_PROPRIETARY_MODULE); 1735 add_taint(TAINT_PROPRIETARY_MODULE);
1722 if (strcmp(mod->name, "driverloader") == 0) 1736 if (strcmp(mod->name, "driverloader") == 0)
1723 add_taint_module(mod, TAINT_PROPRIETARY_MODULE); 1737 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
1724 1738
@@ -2275,11 +2289,14 @@ void print_modules(void)
2275 2289
2276void module_add_driver(struct module *mod, struct device_driver *drv) 2290void module_add_driver(struct module *mod, struct device_driver *drv)
2277{ 2291{
2292 int no_warn;
2293
2278 if (!mod || !drv) 2294 if (!mod || !drv)
2279 return; 2295 return;
2280 2296
2281 /* Don't check return code; this call is idempotent */ 2297 /* Don't check return codes; these calls are idempotent */
2282 sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module"); 2298 no_warn = sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module");
2299 no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj, drv->name);
2283} 2300}
2284EXPORT_SYMBOL(module_add_driver); 2301EXPORT_SYMBOL(module_add_driver);
2285 2302
@@ -2288,6 +2305,8 @@ void module_remove_driver(struct device_driver *drv)
2288 if (!drv) 2305 if (!drv)
2289 return; 2306 return;
2290 sysfs_remove_link(&drv->kobj, "module"); 2307 sysfs_remove_link(&drv->kobj, "module");
2308 if (drv->owner && drv->owner->drivers_dir)
2309 sysfs_remove_link(drv->owner->drivers_dir, drv->name);
2291} 2310}
2292EXPORT_SYMBOL(module_remove_driver); 2311EXPORT_SYMBOL(module_remove_driver);
2293 2312
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index e3203c654d..18651641a7 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -91,7 +91,7 @@ void debug_mutex_init(struct mutex *lock, const char *name,
91 * Make sure we are not reinitializing a held lock: 91 * Make sure we are not reinitializing a held lock:
92 */ 92 */
93 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 93 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
94 lockdep_init_map(&lock->dep_map, name, key); 94 lockdep_init_map(&lock->dep_map, name, key, 0);
95#endif 95#endif
96 lock->owner = NULL; 96 lock->owner = NULL;
97 lock->magic = lock; 97 lock->magic = lock;
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 6ebdb82a0c..674aceb733 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -44,11 +44,9 @@ static inline struct nsproxy *clone_namespaces(struct nsproxy *orig)
44{ 44{
45 struct nsproxy *ns; 45 struct nsproxy *ns;
46 46
47 ns = kmalloc(sizeof(struct nsproxy), GFP_KERNEL); 47 ns = kmemdup(orig, sizeof(struct nsproxy), GFP_KERNEL);
48 if (ns) { 48 if (ns)
49 memcpy(ns, orig, sizeof(struct nsproxy));
50 atomic_set(&ns->count, 1); 49 atomic_set(&ns->count, 1);
51 }
52 return ns; 50 return ns;
53} 51}
54 52
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 479b16b44f..7c3e1e6dfb 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -88,6 +88,19 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock,
88} 88}
89 89
90/* 90/*
91 * Divide and limit the result to res >= 1
92 *
93 * This is necessary to prevent signal delivery starvation, when the result of
94 * the division would be rounded down to 0.
95 */
96static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div)
97{
98 cputime_t res = cputime_div(time, div);
99
100 return max_t(cputime_t, res, 1);
101}
102
103/*
91 * Update expiry time from increment, and increase overrun count, 104 * Update expiry time from increment, and increase overrun count,
92 * given the current clock sample. 105 * given the current clock sample.
93 */ 106 */
@@ -483,8 +496,8 @@ static void process_timer_rebalance(struct task_struct *p,
483 BUG(); 496 BUG();
484 break; 497 break;
485 case CPUCLOCK_PROF: 498 case CPUCLOCK_PROF:
486 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 499 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
487 nthreads); 500 nthreads);
488 do { 501 do {
489 if (likely(!(t->flags & PF_EXITING))) { 502 if (likely(!(t->flags & PF_EXITING))) {
490 ticks = cputime_add(prof_ticks(t), left); 503 ticks = cputime_add(prof_ticks(t), left);
@@ -498,8 +511,8 @@ static void process_timer_rebalance(struct task_struct *p,
498 } while (t != p); 511 } while (t != p);
499 break; 512 break;
500 case CPUCLOCK_VIRT: 513 case CPUCLOCK_VIRT:
501 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 514 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
502 nthreads); 515 nthreads);
503 do { 516 do {
504 if (likely(!(t->flags & PF_EXITING))) { 517 if (likely(!(t->flags & PF_EXITING))) {
505 ticks = cputime_add(virt_ticks(t), left); 518 ticks = cputime_add(virt_ticks(t), left);
@@ -515,6 +528,7 @@ static void process_timer_rebalance(struct task_struct *p,
515 case CPUCLOCK_SCHED: 528 case CPUCLOCK_SCHED:
516 nsleft = expires.sched - val.sched; 529 nsleft = expires.sched - val.sched;
517 do_div(nsleft, nthreads); 530 do_div(nsleft, nthreads);
531 nsleft = max_t(unsigned long long, nsleft, 1);
518 do { 532 do {
519 if (likely(!(t->flags & PF_EXITING))) { 533 if (likely(!(t->flags & PF_EXITING))) {
520 ns = t->sched_time + nsleft; 534 ns = t->sched_time + nsleft;
@@ -1159,12 +1173,13 @@ static void check_process_timers(struct task_struct *tsk,
1159 1173
1160 prof_left = cputime_sub(prof_expires, utime); 1174 prof_left = cputime_sub(prof_expires, utime);
1161 prof_left = cputime_sub(prof_left, stime); 1175 prof_left = cputime_sub(prof_left, stime);
1162 prof_left = cputime_div(prof_left, nthreads); 1176 prof_left = cputime_div_non_zero(prof_left, nthreads);
1163 virt_left = cputime_sub(virt_expires, utime); 1177 virt_left = cputime_sub(virt_expires, utime);
1164 virt_left = cputime_div(virt_left, nthreads); 1178 virt_left = cputime_div_non_zero(virt_left, nthreads);
1165 if (sched_expires) { 1179 if (sched_expires) {
1166 sched_left = sched_expires - sched_time; 1180 sched_left = sched_expires - sched_time;
1167 do_div(sched_left, nthreads); 1181 do_div(sched_left, nthreads);
1182 sched_left = max_t(unsigned long long, sched_left, 1);
1168 } else { 1183 } else {
1169 sched_left = 0; 1184 sched_left = 0;
1170 } 1185 }
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index d3a158a603..b1fb7866b0 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -71,7 +71,7 @@ static inline void platform_finish(void)
71 71
72static int prepare_processes(void) 72static int prepare_processes(void)
73{ 73{
74 int error; 74 int error = 0;
75 75
76 pm_prepare_console(); 76 pm_prepare_console();
77 77
@@ -84,6 +84,12 @@ static int prepare_processes(void)
84 goto thaw; 84 goto thaw;
85 } 85 }
86 86
87 if (pm_disk_mode == PM_DISK_TESTPROC) {
88 printk("swsusp debug: Waiting for 5 seconds.\n");
89 mdelay(5000);
90 goto thaw;
91 }
92
87 /* Free memory before shutting down devices. */ 93 /* Free memory before shutting down devices. */
88 if (!(error = swsusp_shrink_memory())) 94 if (!(error = swsusp_shrink_memory()))
89 return 0; 95 return 0;
@@ -120,13 +126,21 @@ int pm_suspend_disk(void)
120 if (error) 126 if (error)
121 return error; 127 return error;
122 128
129 if (pm_disk_mode == PM_DISK_TESTPROC)
130 goto Thaw;
131
123 suspend_console(); 132 suspend_console();
124 error = device_suspend(PMSG_FREEZE); 133 error = device_suspend(PMSG_FREEZE);
125 if (error) { 134 if (error) {
126 resume_console(); 135 resume_console();
127 printk("Some devices failed to suspend\n"); 136 printk("Some devices failed to suspend\n");
128 unprepare_processes(); 137 goto Thaw;
129 return error; 138 }
139
140 if (pm_disk_mode == PM_DISK_TEST) {
141 printk("swsusp debug: Waiting for 5 seconds.\n");
142 mdelay(5000);
143 goto Done;
130 } 144 }
131 145
132 pr_debug("PM: snapshotting memory.\n"); 146 pr_debug("PM: snapshotting memory.\n");
@@ -143,16 +157,17 @@ int pm_suspend_disk(void)
143 power_down(pm_disk_mode); 157 power_down(pm_disk_mode);
144 else { 158 else {
145 swsusp_free(); 159 swsusp_free();
146 unprepare_processes(); 160 goto Thaw;
147 return error;
148 } 161 }
149 } else 162 } else {
150 pr_debug("PM: Image restored successfully.\n"); 163 pr_debug("PM: Image restored successfully.\n");
164 }
151 165
152 swsusp_free(); 166 swsusp_free();
153 Done: 167 Done:
154 device_resume(); 168 device_resume();
155 resume_console(); 169 resume_console();
170 Thaw:
156 unprepare_processes(); 171 unprepare_processes();
157 return error; 172 return error;
158} 173}
@@ -249,6 +264,8 @@ static const char * const pm_disk_modes[] = {
249 [PM_DISK_PLATFORM] = "platform", 264 [PM_DISK_PLATFORM] = "platform",
250 [PM_DISK_SHUTDOWN] = "shutdown", 265 [PM_DISK_SHUTDOWN] = "shutdown",
251 [PM_DISK_REBOOT] = "reboot", 266 [PM_DISK_REBOOT] = "reboot",
267 [PM_DISK_TEST] = "test",
268 [PM_DISK_TESTPROC] = "testproc",
252}; 269};
253 270
254/** 271/**
@@ -303,17 +320,19 @@ static ssize_t disk_store(struct subsystem * s, const char * buf, size_t n)
303 } 320 }
304 } 321 }
305 if (mode) { 322 if (mode) {
306 if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT) 323 if (mode == PM_DISK_SHUTDOWN || mode == PM_DISK_REBOOT ||
324 mode == PM_DISK_TEST || mode == PM_DISK_TESTPROC) {
307 pm_disk_mode = mode; 325 pm_disk_mode = mode;
308 else { 326 } else {
309 if (pm_ops && pm_ops->enter && 327 if (pm_ops && pm_ops->enter &&
310 (mode == pm_ops->pm_disk_mode)) 328 (mode == pm_ops->pm_disk_mode))
311 pm_disk_mode = mode; 329 pm_disk_mode = mode;
312 else 330 else
313 error = -EINVAL; 331 error = -EINVAL;
314 } 332 }
315 } else 333 } else {
316 error = -EINVAL; 334 error = -EINVAL;
335 }
317 336
318 pr_debug("PM: suspend-to-disk mode set to '%s'\n", 337 pr_debug("PM: suspend-to-disk mode set to '%s'\n",
319 pm_disk_modes[mode]); 338 pm_disk_modes[mode]);
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 9b2ee5344d..1a3b0dd2c3 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -425,7 +425,8 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
425 bio_set_pages_dirty(bio); 425 bio_set_pages_dirty(bio);
426 bio_put(bio); 426 bio_put(bio);
427 } else { 427 } else {
428 get_page(page); 428 if (rw == READ)
429 get_page(page); /* These pages are freed later */
429 bio->bi_private = *bio_chain; 430 bio->bi_private = *bio_chain;
430 *bio_chain = bio; 431 *bio_chain = bio;
431 submit_bio(rw | (1 << BIO_RW_SYNC), bio); 432 submit_bio(rw | (1 << BIO_RW_SYNC), bio);
diff --git a/kernel/printk.c b/kernel/printk.c
index f7d427ef50..66426552fb 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -31,6 +31,7 @@
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/bootmem.h> 32#include <linux/bootmem.h>
33#include <linux/syscalls.h> 33#include <linux/syscalls.h>
34#include <linux/jiffies.h>
34 35
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36 37
@@ -1101,3 +1102,23 @@ int printk_ratelimit(void)
1101 printk_ratelimit_burst); 1102 printk_ratelimit_burst);
1102} 1103}
1103EXPORT_SYMBOL(printk_ratelimit); 1104EXPORT_SYMBOL(printk_ratelimit);
1105
1106/**
1107 * printk_timed_ratelimit - caller-controlled printk ratelimiting
1108 * @caller_jiffies: pointer to caller's state
1109 * @interval_msecs: minimum interval between prints
1110 *
1111 * printk_timed_ratelimit() returns true if more than @interval_msecs
1112 * milliseconds have elapsed since the last time printk_timed_ratelimit()
1113 * returned true.
1114 */
1115bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1116 unsigned int interval_msecs)
1117{
1118 if (*caller_jiffies == 0 || time_after(jiffies, *caller_jiffies)) {
1119 *caller_jiffies = jiffies + msecs_to_jiffies(interval_msecs);
1120 return true;
1121 }
1122 return false;
1123}
1124EXPORT_SYMBOL(printk_timed_ratelimit);
diff --git a/kernel/sched.c b/kernel/sched.c
index 094b5687ee..3399701c68 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -160,15 +160,6 @@
160#define TASK_PREEMPTS_CURR(p, rq) \ 160#define TASK_PREEMPTS_CURR(p, rq) \
161 ((p)->prio < (rq)->curr->prio) 161 ((p)->prio < (rq)->curr->prio)
162 162
163/*
164 * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
165 * to time slice values: [800ms ... 100ms ... 5ms]
166 *
167 * The higher a thread's priority, the bigger timeslices
168 * it gets during one round of execution. But even the lowest
169 * priority thread gets MIN_TIMESLICE worth of execution time.
170 */
171
172#define SCALE_PRIO(x, prio) \ 163#define SCALE_PRIO(x, prio) \
173 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE) 164 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
174 165
@@ -180,6 +171,15 @@ static unsigned int static_prio_timeslice(int static_prio)
180 return SCALE_PRIO(DEF_TIMESLICE, static_prio); 171 return SCALE_PRIO(DEF_TIMESLICE, static_prio);
181} 172}
182 173
174/*
175 * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
176 * to time slice values: [800ms ... 100ms ... 5ms]
177 *
178 * The higher a thread's priority, the bigger timeslices
179 * it gets during one round of execution. But even the lowest
180 * priority thread gets MIN_TIMESLICE worth of execution time.
181 */
182
183static inline unsigned int task_timeslice(struct task_struct *p) 183static inline unsigned int task_timeslice(struct task_struct *p)
184{ 184{
185 return static_prio_timeslice(p->static_prio); 185 return static_prio_timeslice(p->static_prio);
diff --git a/kernel/signal.c b/kernel/signal.c
index 7ed8d5304b..df18c167a2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -267,18 +267,25 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
267 int override_rlimit) 267 int override_rlimit)
268{ 268{
269 struct sigqueue *q = NULL; 269 struct sigqueue *q = NULL;
270 struct user_struct *user;
270 271
271 atomic_inc(&t->user->sigpending); 272 /*
273 * In order to avoid problems with "switch_user()", we want to make
274 * sure that the compiler doesn't re-load "t->user"
275 */
276 user = t->user;
277 barrier();
278 atomic_inc(&user->sigpending);
272 if (override_rlimit || 279 if (override_rlimit ||
273 atomic_read(&t->user->sigpending) <= 280 atomic_read(&user->sigpending) <=
274 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) 281 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
275 q = kmem_cache_alloc(sigqueue_cachep, flags); 282 q = kmem_cache_alloc(sigqueue_cachep, flags);
276 if (unlikely(q == NULL)) { 283 if (unlikely(q == NULL)) {
277 atomic_dec(&t->user->sigpending); 284 atomic_dec(&user->sigpending);
278 } else { 285 } else {
279 INIT_LIST_HEAD(&q->list); 286 INIT_LIST_HEAD(&q->list);
280 q->flags = 0; 287 q->flags = 0;
281 q->user = get_uid(t->user); 288 q->user = get_uid(user);
282 } 289 }
283 return(q); 290 return(q);
284} 291}
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 476c374151..2c6c2bf855 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -293,6 +293,27 @@ void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
293} 293}
294 294
295EXPORT_SYMBOL(_spin_lock_nested); 295EXPORT_SYMBOL(_spin_lock_nested);
296unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclass)
297{
298 unsigned long flags;
299
300 local_irq_save(flags);
301 preempt_disable();
302 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
303 /*
304 * On lockdep we dont want the hand-coded irq-enable of
305 * _raw_spin_lock_flags() code, because lockdep assumes
306 * that interrupts are not re-enabled during lock-acquire:
307 */
308#ifdef CONFIG_PROVE_SPIN_LOCKING
309 _raw_spin_lock(lock);
310#else
311 _raw_spin_lock_flags(lock, &flags);
312#endif
313 return flags;
314}
315
316EXPORT_SYMBOL(_spin_lock_irqsave_nested);
296 317
297#endif 318#endif
298 319
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 7a3b2e75f0..d7306d0f3d 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -49,6 +49,7 @@ cond_syscall(compat_sys_get_robust_list);
49cond_syscall(sys_epoll_create); 49cond_syscall(sys_epoll_create);
50cond_syscall(sys_epoll_ctl); 50cond_syscall(sys_epoll_ctl);
51cond_syscall(sys_epoll_wait); 51cond_syscall(sys_epoll_wait);
52cond_syscall(sys_epoll_pwait);
52cond_syscall(sys_semget); 53cond_syscall(sys_semget);
53cond_syscall(sys_semop); 54cond_syscall(sys_semop);
54cond_syscall(sys_semtimedop); 55cond_syscall(sys_semtimedop);
@@ -134,6 +135,7 @@ cond_syscall(sys_madvise);
134cond_syscall(sys_mremap); 135cond_syscall(sys_mremap);
135cond_syscall(sys_remap_file_pages); 136cond_syscall(sys_remap_file_pages);
136cond_syscall(compat_sys_move_pages); 137cond_syscall(compat_sys_move_pages);
138cond_syscall(compat_sys_migrate_pages);
137 139
138/* block-layer dependent */ 140/* block-layer dependent */
139cond_syscall(sys_bdflush); 141cond_syscall(sys_bdflush);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8020fb273c..09e569f479 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -136,8 +136,10 @@ static int parse_table(int __user *, int, void __user *, size_t __user *,
136static int proc_do_uts_string(ctl_table *table, int write, struct file *filp, 136static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
137 void __user *buffer, size_t *lenp, loff_t *ppos); 137 void __user *buffer, size_t *lenp, loff_t *ppos);
138 138
139#ifdef CONFIG_PROC_SYSCTL
139static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, 140static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
140 void __user *buffer, size_t *lenp, loff_t *ppos); 141 void __user *buffer, size_t *lenp, loff_t *ppos);
142#endif
141 143
142static ctl_table root_table[]; 144static ctl_table root_table[];
143static struct ctl_table_header root_table_header = 145static struct ctl_table_header root_table_header =
@@ -542,6 +544,7 @@ static ctl_table kern_table[] = {
542 .proc_handler = &proc_dointvec, 544 .proc_handler = &proc_dointvec,
543 }, 545 },
544#endif 546#endif
547#ifdef CONFIG_PROC_SYSCTL
545 { 548 {
546 .ctl_name = KERN_CADPID, 549 .ctl_name = KERN_CADPID,
547 .procname = "cad_pid", 550 .procname = "cad_pid",
@@ -550,6 +553,7 @@ static ctl_table kern_table[] = {
550 .mode = 0600, 553 .mode = 0600,
551 .proc_handler = &proc_do_cad_pid, 554 .proc_handler = &proc_do_cad_pid,
552 }, 555 },
556#endif
553 { 557 {
554 .ctl_name = KERN_MAX_THREADS, 558 .ctl_name = KERN_MAX_THREADS,
555 .procname = "threads-max", 559 .procname = "threads-max",
@@ -1311,7 +1315,9 @@ repeat:
1311 return -ENOTDIR; 1315 return -ENOTDIR;
1312 if (get_user(n, name)) 1316 if (get_user(n, name))
1313 return -EFAULT; 1317 return -EFAULT;
1314 for ( ; table->ctl_name; table++) { 1318 for ( ; table->ctl_name || table->procname; table++) {
1319 if (!table->ctl_name)
1320 continue;
1315 if (n == table->ctl_name || table->ctl_name == CTL_ANY) { 1321 if (n == table->ctl_name || table->ctl_name == CTL_ANY) {
1316 int error; 1322 int error;
1317 if (table->child) { 1323 if (table->child) {
@@ -1528,7 +1534,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root,
1528 int len; 1534 int len;
1529 mode_t mode; 1535 mode_t mode;
1530 1536
1531 for (; table->ctl_name; table++) { 1537 for (; table->ctl_name || table->procname; table++) {
1532 /* Can't do anything without a proc name. */ 1538 /* Can't do anything without a proc name. */
1533 if (!table->procname) 1539 if (!table->procname)
1534 continue; 1540 continue;
@@ -1575,7 +1581,7 @@ static void register_proc_table(ctl_table * table, struct proc_dir_entry *root,
1575static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root) 1581static void unregister_proc_table(ctl_table * table, struct proc_dir_entry *root)
1576{ 1582{
1577 struct proc_dir_entry *de; 1583 struct proc_dir_entry *de;
1578 for (; table->ctl_name; table++) { 1584 for (; table->ctl_name || table->procname; table++) {
1579 if (!(de = table->de)) 1585 if (!(de = table->de))
1580 continue; 1586 continue;
1581 if (de->mode & S_IFDIR) { 1587 if (de->mode & S_IFDIR) {
@@ -2676,13 +2682,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
2676asmlinkage long sys_sysctl(struct __sysctl_args __user *args) 2682asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
2677{ 2683{
2678 static int msg_count; 2684 static int msg_count;
2685 struct __sysctl_args tmp;
2686 int name[CTL_MAXNAME];
2687 int i;
2688
2689 /* Read in the sysctl name for better debug message logging */
2690 if (copy_from_user(&tmp, args, sizeof(tmp)))
2691 return -EFAULT;
2692 if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME)
2693 return -ENOTDIR;
2694 for (i = 0; i < tmp.nlen; i++)
2695 if (get_user(name[i], tmp.name + i))
2696 return -EFAULT;
2697
2698 /* Ignore accesses to kernel.version */
2699 if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
2700 goto out;
2679 2701
2680 if (msg_count < 5) { 2702 if (msg_count < 5) {
2681 msg_count++; 2703 msg_count++;
2682 printk(KERN_INFO 2704 printk(KERN_INFO
2683 "warning: process `%s' used the removed sysctl " 2705 "warning: process `%s' used the removed sysctl "
2684 "system call\n", current->comm); 2706 "system call with ", current->comm);
2707 for (i = 0; i < tmp.nlen; i++)
2708 printk("%d.", name[i]);
2709 printk("\n");
2685 } 2710 }
2711out:
2686 return -ENOSYS; 2712 return -ENOSYS;
2687} 2713}
2688 2714
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 5d6a8c54ee..d3d28919d4 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -77,7 +77,7 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
77 /* 77 /*
78 * If new attributes are added, please revisit this allocation 78 * If new attributes are added, please revisit this allocation
79 */ 79 */
80 skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL); 80 skb = genlmsg_new(size, GFP_KERNEL);
81 if (!skb) 81 if (!skb)
82 return -ENOMEM; 82 return -ENOMEM;
83 83
@@ -85,13 +85,9 @@ static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
85 int seq = get_cpu_var(taskstats_seqnum)++; 85 int seq = get_cpu_var(taskstats_seqnum)++;
86 put_cpu_var(taskstats_seqnum); 86 put_cpu_var(taskstats_seqnum);
87 87
88 reply = genlmsg_put(skb, 0, seq, 88 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
89 family.id, 0, 0,
90 cmd, family.version);
91 } else 89 } else
92 reply = genlmsg_put(skb, info->snd_pid, info->snd_seq, 90 reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
93 family.id, 0, 0,
94 cmd, family.version);
95 if (reply == NULL) { 91 if (reply == NULL) {
96 nlmsg_free(skb); 92 nlmsg_free(skb);
97 return -EINVAL; 93 return -EINVAL;
@@ -174,21 +170,19 @@ static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
174 up_write(&listeners->sem); 170 up_write(&listeners->sem);
175} 171}
176 172
177static int fill_pid(pid_t pid, struct task_struct *pidtsk, 173static int fill_pid(pid_t pid, struct task_struct *tsk,
178 struct taskstats *stats) 174 struct taskstats *stats)
179{ 175{
180 int rc = 0; 176 int rc = 0;
181 struct task_struct *tsk = pidtsk;
182 177
183 if (!pidtsk) { 178 if (!tsk) {
184 read_lock(&tasklist_lock); 179 rcu_read_lock();
185 tsk = find_task_by_pid(pid); 180 tsk = find_task_by_pid(pid);
186 if (!tsk) { 181 if (tsk)
187 read_unlock(&tasklist_lock); 182 get_task_struct(tsk);
183 rcu_read_unlock();
184 if (!tsk)
188 return -ESRCH; 185 return -ESRCH;
189 }
190 get_task_struct(tsk);
191 read_unlock(&tasklist_lock);
192 } else 186 } else
193 get_task_struct(tsk); 187 get_task_struct(tsk);
194 188
@@ -214,39 +208,30 @@ static int fill_pid(pid_t pid, struct task_struct *pidtsk,
214 208
215} 209}
216 210
217static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk, 211static int fill_tgid(pid_t tgid, struct task_struct *first,
218 struct taskstats *stats) 212 struct taskstats *stats)
219{ 213{
220 struct task_struct *tsk, *first; 214 struct task_struct *tsk;
221 unsigned long flags; 215 unsigned long flags;
216 int rc = -ESRCH;
222 217
223 /* 218 /*
224 * Add additional stats from live tasks except zombie thread group 219 * Add additional stats from live tasks except zombie thread group
225 * leaders who are already counted with the dead tasks 220 * leaders who are already counted with the dead tasks
226 */ 221 */
227 first = tgidtsk; 222 rcu_read_lock();
228 if (!first) { 223 if (!first)
229 read_lock(&tasklist_lock);
230 first = find_task_by_pid(tgid); 224 first = find_task_by_pid(tgid);
231 if (!first) {
232 read_unlock(&tasklist_lock);
233 return -ESRCH;
234 }
235 get_task_struct(first);
236 read_unlock(&tasklist_lock);
237 } else
238 get_task_struct(first);
239 225
240 /* Start with stats from dead tasks */ 226 if (!first || !lock_task_sighand(first, &flags))
241 spin_lock_irqsave(&first->signal->stats_lock, flags); 227 goto out;
228
242 if (first->signal->stats) 229 if (first->signal->stats)
243 memcpy(stats, first->signal->stats, sizeof(*stats)); 230 memcpy(stats, first->signal->stats, sizeof(*stats));
244 spin_unlock_irqrestore(&first->signal->stats_lock, flags);
245 231
246 tsk = first; 232 tsk = first;
247 read_lock(&tasklist_lock);
248 do { 233 do {
249 if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk)) 234 if (tsk->exit_state)
250 continue; 235 continue;
251 /* 236 /*
252 * Accounting subsystem can call its functions here to 237 * Accounting subsystem can call its functions here to
@@ -257,15 +242,18 @@ static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
257 delayacct_add_tsk(stats, tsk); 242 delayacct_add_tsk(stats, tsk);
258 243
259 } while_each_thread(first, tsk); 244 } while_each_thread(first, tsk);
260 read_unlock(&tasklist_lock);
261 stats->version = TASKSTATS_VERSION;
262 245
246 unlock_task_sighand(first, &flags);
247 rc = 0;
248out:
249 rcu_read_unlock();
250
251 stats->version = TASKSTATS_VERSION;
263 /* 252 /*
264 * Accounting subsytems can also add calls here to modify 253 * Accounting subsytems can also add calls here to modify
265 * fields of taskstats. 254 * fields of taskstats.
266 */ 255 */
267 256 return rc;
268 return 0;
269} 257}
270 258
271 259
@@ -273,7 +261,7 @@ static void fill_tgid_exit(struct task_struct *tsk)
273{ 261{
274 unsigned long flags; 262 unsigned long flags;
275 263
276 spin_lock_irqsave(&tsk->signal->stats_lock, flags); 264 spin_lock_irqsave(&tsk->sighand->siglock, flags);
277 if (!tsk->signal->stats) 265 if (!tsk->signal->stats)
278 goto ret; 266 goto ret;
279 267
@@ -285,7 +273,7 @@ static void fill_tgid_exit(struct task_struct *tsk)
285 */ 273 */
286 delayacct_add_tsk(tsk->signal->stats, tsk); 274 delayacct_add_tsk(tsk->signal->stats, tsk);
287ret: 275ret:
288 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags); 276 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
289 return; 277 return;
290} 278}
291 279
@@ -419,7 +407,7 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
419 return send_reply(rep_skb, info->snd_pid); 407 return send_reply(rep_skb, info->snd_pid);
420 408
421nla_put_failure: 409nla_put_failure:
422 return genlmsg_cancel(rep_skb, reply); 410 rc = genlmsg_cancel(rep_skb, reply);
423err: 411err:
424 nlmsg_free(rep_skb); 412 nlmsg_free(rep_skb);
425 return rc; 413 return rc;
@@ -461,24 +449,26 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
461 size_t size; 449 size_t size;
462 int is_thread_group; 450 int is_thread_group;
463 struct nlattr *na; 451 struct nlattr *na;
464 unsigned long flags;
465 452
466 if (!family_registered || !tidstats) 453 if (!family_registered)
467 return; 454 return;
468 455
469 spin_lock_irqsave(&tsk->signal->stats_lock, flags);
470 is_thread_group = tsk->signal->stats ? 1 : 0;
471 spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
472
473 rc = 0;
474 /* 456 /*
475 * Size includes space for nested attributes 457 * Size includes space for nested attributes
476 */ 458 */
477 size = nla_total_size(sizeof(u32)) + 459 size = nla_total_size(sizeof(u32)) +
478 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0); 460 nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
479 461
480 if (is_thread_group) 462 is_thread_group = (tsk->signal->stats != NULL);
481 size = 2 * size; /* PID + STATS + TGID + STATS */ 463 if (is_thread_group) {
464 /* PID + STATS + TGID + STATS */
465 size = 2 * size;
466 /* fill the tsk->signal->stats structure */
467 fill_tgid_exit(tsk);
468 }
469
470 if (!tidstats)
471 return;
482 472
483 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size); 473 rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
484 if (rc < 0) 474 if (rc < 0)
@@ -498,11 +488,8 @@ void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
498 goto send; 488 goto send;
499 489
500 /* 490 /*
501 * tsk has/had a thread group so fill the tsk->signal->stats structure
502 * Doesn't matter if tsk is the leader or the last group member leaving 491 * Doesn't matter if tsk is the leader or the last group member leaving
503 */ 492 */
504
505 fill_tgid_exit(tsk);
506 if (!group_dead) 493 if (!group_dead)
507 goto send; 494 goto send;
508 495
@@ -519,7 +506,6 @@ send:
519 506
520nla_put_failure: 507nla_put_failure:
521 genlmsg_cancel(rep_skb, reply); 508 genlmsg_cancel(rep_skb, reply);
522 goto ret;
523err_skb: 509err_skb:
524 nlmsg_free(rep_skb); 510 nlmsg_free(rep_skb);
525ret: 511ret:
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 126bb30c4a..a99b2a6e6a 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -57,7 +57,7 @@ static cycle_t jiffies_read(void)
57 57
58struct clocksource clocksource_jiffies = { 58struct clocksource clocksource_jiffies = {
59 .name = "jiffies", 59 .name = "jiffies",
60 .rating = 0, /* lowest rating*/ 60 .rating = 1, /* lowest valid rating*/
61 .read = jiffies_read, 61 .read = jiffies_read,
62 .mask = 0xffffffff, /*32bits*/ 62 .mask = 0xffffffff, /*32bits*/
63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ 63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 47195fa0ec..3afeaa3a73 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -161,9 +161,9 @@ void second_overflow(void)
161 time_adjust += MAX_TICKADJ; 161 time_adjust += MAX_TICKADJ;
162 tick_length -= MAX_TICKADJ_SCALED; 162 tick_length -= MAX_TICKADJ_SCALED;
163 } else { 163 } else {
164 time_adjust = 0;
165 tick_length += (s64)(time_adjust * NSEC_PER_USEC / 164 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
166 HZ) << TICK_LENGTH_SHIFT; 165 HZ) << TICK_LENGTH_SHIFT;
166 time_adjust = 0;
167 } 167 }
168 } 168 }
169} 169}
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index db443221ba..96f77013d3 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -36,7 +36,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
36 36
37 /* calculate task elapsed time in timespec */ 37 /* calculate task elapsed time in timespec */
38 do_posix_clock_monotonic_gettime(&uptime); 38 do_posix_clock_monotonic_gettime(&uptime);
39 ts = timespec_sub(uptime, current->group_leader->start_time); 39 ts = timespec_sub(uptime, tsk->start_time);
40 /* rebase elapsed time to usec */ 40 /* rebase elapsed time to usec */
41 ac_etime = timespec_to_ns(&ts); 41 ac_etime = timespec_to_ns(&ts);
42 do_div(ac_etime, NSEC_PER_USEC); 42 do_div(ac_etime, NSEC_PER_USEC);
@@ -58,7 +58,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
58 stats->ac_uid = tsk->uid; 58 stats->ac_uid = tsk->uid;
59 stats->ac_gid = tsk->gid; 59 stats->ac_gid = tsk->gid;
60 stats->ac_pid = tsk->pid; 60 stats->ac_pid = tsk->pid;
61 stats->ac_ppid = (tsk->parent) ? tsk->parent->pid : 0; 61 rcu_read_lock();
62 stats->ac_ppid = pid_alive(tsk) ?
63 rcu_dereference(tsk->real_parent)->tgid : 0;
64 rcu_read_unlock();
62 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; 65 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
63 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; 66 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
64 stats->ac_minflt = tsk->min_flt; 67 stats->ac_minflt = tsk->min_flt;
@@ -77,13 +80,17 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
77 */ 80 */
78void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) 81void xacct_add_tsk(struct taskstats *stats, struct task_struct *p)
79{ 82{
83 struct mm_struct *mm;
84
80 /* convert pages-jiffies to Mbyte-usec */ 85 /* convert pages-jiffies to Mbyte-usec */
81 stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB; 86 stats->coremem = jiffies_to_usecs(p->acct_rss_mem1) * PAGE_SIZE / MB;
82 stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB; 87 stats->virtmem = jiffies_to_usecs(p->acct_vm_mem1) * PAGE_SIZE / MB;
83 if (p->mm) { 88 mm = get_task_mm(p);
89 if (mm) {
84 /* adjust to KB unit */ 90 /* adjust to KB unit */
85 stats->hiwater_rss = p->mm->hiwater_rss * PAGE_SIZE / KB; 91 stats->hiwater_rss = mm->hiwater_rss * PAGE_SIZE / KB;
86 stats->hiwater_vm = p->mm->hiwater_vm * PAGE_SIZE / KB; 92 stats->hiwater_vm = mm->hiwater_vm * PAGE_SIZE / KB;
93 mmput(mm);
87 } 94 }
88 stats->read_char = p->rchar; 95 stats->read_char = p->rchar;
89 stats->write_char = p->wchar; 96 stats->write_char = p->wchar;
diff --git a/kernel/unwind.c b/kernel/unwind.c
index 2e2368607a..ed0a21d4a9 100644
--- a/kernel/unwind.c
+++ b/kernel/unwind.c
@@ -11,13 +11,15 @@
11 11
12#include <linux/unwind.h> 12#include <linux/unwind.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/delay.h> 14#include <linux/bootmem.h>
15#include <linux/sort.h>
15#include <linux/stop_machine.h> 16#include <linux/stop_machine.h>
16#include <asm/sections.h> 17#include <asm/sections.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <asm/unaligned.h> 19#include <asm/unaligned.h>
19 20
20extern char __start_unwind[], __end_unwind[]; 21extern char __start_unwind[], __end_unwind[];
22extern const u8 __start_unwind_hdr[], __end_unwind_hdr[];
21 23
22#define MAX_STACK_DEPTH 8 24#define MAX_STACK_DEPTH 8
23 25
@@ -100,6 +102,8 @@ static struct unwind_table {
100 } core, init; 102 } core, init;
101 const void *address; 103 const void *address;
102 unsigned long size; 104 unsigned long size;
105 const unsigned char *header;
106 unsigned long hdrsz;
103 struct unwind_table *link; 107 struct unwind_table *link;
104 const char *name; 108 const char *name;
105} root_table; 109} root_table;
@@ -145,6 +149,10 @@ static struct unwind_table *find_table(unsigned long pc)
145 return table; 149 return table;
146} 150}
147 151
152static unsigned long read_pointer(const u8 **pLoc,
153 const void *end,
154 signed ptrType);
155
148static void init_unwind_table(struct unwind_table *table, 156static void init_unwind_table(struct unwind_table *table,
149 const char *name, 157 const char *name,
150 const void *core_start, 158 const void *core_start,
@@ -152,14 +160,30 @@ static void init_unwind_table(struct unwind_table *table,
152 const void *init_start, 160 const void *init_start,
153 unsigned long init_size, 161 unsigned long init_size,
154 const void *table_start, 162 const void *table_start,
155 unsigned long table_size) 163 unsigned long table_size,
164 const u8 *header_start,
165 unsigned long header_size)
156{ 166{
167 const u8 *ptr = header_start + 4;
168 const u8 *end = header_start + header_size;
169
157 table->core.pc = (unsigned long)core_start; 170 table->core.pc = (unsigned long)core_start;
158 table->core.range = core_size; 171 table->core.range = core_size;
159 table->init.pc = (unsigned long)init_start; 172 table->init.pc = (unsigned long)init_start;
160 table->init.range = init_size; 173 table->init.range = init_size;
161 table->address = table_start; 174 table->address = table_start;
162 table->size = table_size; 175 table->size = table_size;
176 /* See if the linker provided table looks valid. */
177 if (header_size <= 4
178 || header_start[0] != 1
179 || (void *)read_pointer(&ptr, end, header_start[1]) != table_start
180 || header_start[2] == DW_EH_PE_omit
181 || read_pointer(&ptr, end, header_start[2]) <= 0
182 || header_start[3] == DW_EH_PE_omit)
183 header_start = NULL;
184 table->hdrsz = header_size;
185 smp_wmb();
186 table->header = header_start;
163 table->link = NULL; 187 table->link = NULL;
164 table->name = name; 188 table->name = name;
165} 189}
@@ -169,7 +193,143 @@ void __init unwind_init(void)
169 init_unwind_table(&root_table, "kernel", 193 init_unwind_table(&root_table, "kernel",
170 _text, _end - _text, 194 _text, _end - _text,
171 NULL, 0, 195 NULL, 0,
172 __start_unwind, __end_unwind - __start_unwind); 196 __start_unwind, __end_unwind - __start_unwind,
197 __start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);
198}
199
200static const u32 bad_cie, not_fde;
201static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *);
202static signed fde_pointer_type(const u32 *cie);
203
204struct eh_frame_hdr_table_entry {
205 unsigned long start, fde;
206};
207
208static int cmp_eh_frame_hdr_table_entries(const void *p1, const void *p2)
209{
210 const struct eh_frame_hdr_table_entry *e1 = p1;
211 const struct eh_frame_hdr_table_entry *e2 = p2;
212
213 return (e1->start > e2->start) - (e1->start < e2->start);
214}
215
216static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
217{
218 struct eh_frame_hdr_table_entry *e1 = p1;
219 struct eh_frame_hdr_table_entry *e2 = p2;
220 unsigned long v;
221
222 v = e1->start;
223 e1->start = e2->start;
224 e2->start = v;
225 v = e1->fde;
226 e1->fde = e2->fde;
227 e2->fde = v;
228}
229
230static void __init setup_unwind_table(struct unwind_table *table,
231 void *(*alloc)(unsigned long))
232{
233 const u8 *ptr;
234 unsigned long tableSize = table->size, hdrSize;
235 unsigned n;
236 const u32 *fde;
237 struct {
238 u8 version;
239 u8 eh_frame_ptr_enc;
240 u8 fde_count_enc;
241 u8 table_enc;
242 unsigned long eh_frame_ptr;
243 unsigned int fde_count;
244 struct eh_frame_hdr_table_entry table[];
245 } __attribute__((__packed__)) *header;
246
247 if (table->header)
248 return;
249
250 if (table->hdrsz)
251 printk(KERN_WARNING ".eh_frame_hdr for '%s' present but unusable\n",
252 table->name);
253
254 if (tableSize & (sizeof(*fde) - 1))
255 return;
256
257 for (fde = table->address, n = 0;
258 tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde;
259 tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
260 const u32 *cie = cie_for_fde(fde, table);
261 signed ptrType;
262
263 if (cie == &not_fde)
264 continue;
265 if (cie == NULL
266 || cie == &bad_cie
267 || (ptrType = fde_pointer_type(cie)) < 0)
268 return;
269 ptr = (const u8 *)(fde + 2);
270 if (!read_pointer(&ptr,
271 (const u8 *)(fde + 1) + *fde,
272 ptrType))
273 return;
274 ++n;
275 }
276
277 if (tableSize || !n)
278 return;
279
280 hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
281 + 2 * n * sizeof(unsigned long);
282 header = alloc(hdrSize);
283 if (!header)
284 return;
285 header->version = 1;
286 header->eh_frame_ptr_enc = DW_EH_PE_abs|DW_EH_PE_native;
287 header->fde_count_enc = DW_EH_PE_abs|DW_EH_PE_data4;
288 header->table_enc = DW_EH_PE_abs|DW_EH_PE_native;
289 put_unaligned((unsigned long)table->address, &header->eh_frame_ptr);
290 BUILD_BUG_ON(offsetof(typeof(*header), fde_count)
291 % __alignof(typeof(header->fde_count)));
292 header->fde_count = n;
293
294 BUILD_BUG_ON(offsetof(typeof(*header), table)
295 % __alignof(typeof(*header->table)));
296 for (fde = table->address, tableSize = table->size, n = 0;
297 tableSize;
298 tableSize -= sizeof(*fde) + *fde, fde += 1 + *fde / sizeof(*fde)) {
299 const u32 *cie = fde + 1 - fde[1] / sizeof(*fde);
300
301 if (!fde[1])
302 continue; /* this is a CIE */
303 ptr = (const u8 *)(fde + 2);
304 header->table[n].start = read_pointer(&ptr,
305 (const u8 *)(fde + 1) + *fde,
306 fde_pointer_type(cie));
307 header->table[n].fde = (unsigned long)fde;
308 ++n;
309 }
310 WARN_ON(n != header->fde_count);
311
312 sort(header->table,
313 n,
314 sizeof(*header->table),
315 cmp_eh_frame_hdr_table_entries,
316 swap_eh_frame_hdr_table_entries);
317
318 table->hdrsz = hdrSize;
319 smp_wmb();
320 table->header = (const void *)header;
321}
322
323static void *__init balloc(unsigned long sz)
324{
325 return __alloc_bootmem_nopanic(sz,
326 sizeof(unsigned int),
327 __pa(MAX_DMA_ADDRESS));
328}
329
330void __init unwind_setup(void)
331{
332 setup_unwind_table(&root_table, balloc);
173} 333}
174 334
175#ifdef CONFIG_MODULES 335#ifdef CONFIG_MODULES
@@ -193,7 +353,8 @@ void *unwind_add_table(struct module *module,
193 init_unwind_table(table, module->name, 353 init_unwind_table(table, module->name,
194 module->module_core, module->core_size, 354 module->module_core, module->core_size,
195 module->module_init, module->init_size, 355 module->module_init, module->init_size,
196 table_start, table_size); 356 table_start, table_size,
357 NULL, 0);
197 358
198 if (last_table) 359 if (last_table)
199 last_table->link = table; 360 last_table->link = table;
@@ -303,6 +464,26 @@ static sleb128_t get_sleb128(const u8 **pcur, const u8 *end)
303 return value; 464 return value;
304} 465}
305 466
467static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
468{
469 const u32 *cie;
470
471 if (!*fde || (*fde & (sizeof(*fde) - 1)))
472 return &bad_cie;
473 if (!fde[1])
474 return &not_fde; /* this is a CIE */
475 if ((fde[1] & (sizeof(*fde) - 1))
476 || fde[1] > (unsigned long)(fde + 1) - (unsigned long)table->address)
477 return NULL; /* this is not a valid FDE */
478 cie = fde + 1 - fde[1] / sizeof(*fde);
479 if (*cie <= sizeof(*cie) + 4
480 || *cie >= fde[1] - sizeof(*fde)
481 || (*cie & (sizeof(*cie) - 1))
482 || cie[1])
483 return NULL; /* this is not a (valid) CIE */
484 return cie;
485}
486
306static unsigned long read_pointer(const u8 **pLoc, 487static unsigned long read_pointer(const u8 **pLoc,
307 const void *end, 488 const void *end,
308 signed ptrType) 489 signed ptrType)
@@ -610,49 +791,108 @@ int unwind(struct unwind_frame_info *frame)
610 unsigned i; 791 unsigned i;
611 signed ptrType = -1; 792 signed ptrType = -1;
612 uleb128_t retAddrReg = 0; 793 uleb128_t retAddrReg = 0;
613 struct unwind_table *table; 794 const struct unwind_table *table;
614 struct unwind_state state; 795 struct unwind_state state;
615 796
616 if (UNW_PC(frame) == 0) 797 if (UNW_PC(frame) == 0)
617 return -EINVAL; 798 return -EINVAL;
618 if ((table = find_table(pc)) != NULL 799 if ((table = find_table(pc)) != NULL
619 && !(table->size & (sizeof(*fde) - 1))) { 800 && !(table->size & (sizeof(*fde) - 1))) {
620 unsigned long tableSize = table->size; 801 const u8 *hdr = table->header;
621 802 unsigned long tableSize;
622 for (fde = table->address; 803
623 tableSize > sizeof(*fde) && tableSize - sizeof(*fde) >= *fde; 804 smp_rmb();
624 tableSize -= sizeof(*fde) + *fde, 805 if (hdr && hdr[0] == 1) {
625 fde += 1 + *fde / sizeof(*fde)) { 806 switch(hdr[3] & DW_EH_PE_FORM) {
626 if (!*fde || (*fde & (sizeof(*fde) - 1))) 807 case DW_EH_PE_native: tableSize = sizeof(unsigned long); break;
627 break; 808 case DW_EH_PE_data2: tableSize = 2; break;
628 if (!fde[1]) 809 case DW_EH_PE_data4: tableSize = 4; break;
629 continue; /* this is a CIE */ 810 case DW_EH_PE_data8: tableSize = 8; break;
630 if ((fde[1] & (sizeof(*fde) - 1)) 811 default: tableSize = 0; break;
631 || fde[1] > (unsigned long)(fde + 1)
632 - (unsigned long)table->address)
633 continue; /* this is not a valid FDE */
634 cie = fde + 1 - fde[1] / sizeof(*fde);
635 if (*cie <= sizeof(*cie) + 4
636 || *cie >= fde[1] - sizeof(*fde)
637 || (*cie & (sizeof(*cie) - 1))
638 || cie[1]
639 || (ptrType = fde_pointer_type(cie)) < 0) {
640 cie = NULL; /* this is not a (valid) CIE */
641 continue;
642 } 812 }
813 ptr = hdr + 4;
814 end = hdr + table->hdrsz;
815 if (tableSize
816 && read_pointer(&ptr, end, hdr[1])
817 == (unsigned long)table->address
818 && (i = read_pointer(&ptr, end, hdr[2])) > 0
819 && i == (end - ptr) / (2 * tableSize)
820 && !((end - ptr) % (2 * tableSize))) {
821 do {
822 const u8 *cur = ptr + (i / 2) * (2 * tableSize);
823
824 startLoc = read_pointer(&cur,
825 cur + tableSize,
826 hdr[3]);
827 if (pc < startLoc)
828 i /= 2;
829 else {
830 ptr = cur - tableSize;
831 i = (i + 1) / 2;
832 }
833 } while (startLoc && i > 1);
834 if (i == 1
835 && (startLoc = read_pointer(&ptr,
836 ptr + tableSize,
837 hdr[3])) != 0
838 && pc >= startLoc)
839 fde = (void *)read_pointer(&ptr,
840 ptr + tableSize,
841 hdr[3]);
842 }
843 }
844
845 if (fde != NULL) {
846 cie = cie_for_fde(fde, table);
643 ptr = (const u8 *)(fde + 2); 847 ptr = (const u8 *)(fde + 2);
644 startLoc = read_pointer(&ptr, 848 if(cie != NULL
645 (const u8 *)(fde + 1) + *fde, 849 && cie != &bad_cie
646 ptrType); 850 && cie != &not_fde
647 endLoc = startLoc 851 && (ptrType = fde_pointer_type(cie)) >= 0
648 + read_pointer(&ptr, 852 && read_pointer(&ptr,
649 (const u8 *)(fde + 1) + *fde, 853 (const u8 *)(fde + 1) + *fde,
650 ptrType & DW_EH_PE_indirect 854 ptrType) == startLoc) {
651 ? ptrType 855 if (!(ptrType & DW_EH_PE_indirect))
652 : ptrType & (DW_EH_PE_FORM|DW_EH_PE_signed)); 856 ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
653 if (pc >= startLoc && pc < endLoc) 857 endLoc = startLoc
654 break; 858 + read_pointer(&ptr,
655 cie = NULL; 859 (const u8 *)(fde + 1) + *fde,
860 ptrType);
861 if(pc >= endLoc)
862 fde = NULL;
863 } else
864 fde = NULL;
865 }
866 if (fde == NULL) {
867 for (fde = table->address, tableSize = table->size;
868 cie = NULL, tableSize > sizeof(*fde)
869 && tableSize - sizeof(*fde) >= *fde;
870 tableSize -= sizeof(*fde) + *fde,
871 fde += 1 + *fde / sizeof(*fde)) {
872 cie = cie_for_fde(fde, table);
873 if (cie == &bad_cie) {
874 cie = NULL;
875 break;
876 }
877 if (cie == NULL
878 || cie == &not_fde
879 || (ptrType = fde_pointer_type(cie)) < 0)
880 continue;
881 ptr = (const u8 *)(fde + 2);
882 startLoc = read_pointer(&ptr,
883 (const u8 *)(fde + 1) + *fde,
884 ptrType);
885 if (!startLoc)
886 continue;
887 if (!(ptrType & DW_EH_PE_indirect))
888 ptrType &= DW_EH_PE_FORM|DW_EH_PE_signed;
889 endLoc = startLoc
890 + read_pointer(&ptr,
891 (const u8 *)(fde + 1) + *fde,
892 ptrType);
893 if (pc >= startLoc && pc < endLoc)
894 break;
895 }
656 } 896 }
657 } 897 }
658 if (cie != NULL) { 898 if (cie != NULL) {
@@ -698,8 +938,11 @@ int unwind(struct unwind_frame_info *frame)
698 else { 938 else {
699 retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end); 939 retAddrReg = state.version <= 1 ? *ptr++ : get_uleb128(&ptr, end);
700 /* skip augmentation */ 940 /* skip augmentation */
701 if (((const char *)(cie + 2))[1] == 'z') 941 if (((const char *)(cie + 2))[1] == 'z') {
702 ptr += get_uleb128(&ptr, end); 942 uleb128_t augSize = get_uleb128(&ptr, end);
943
944 ptr += augSize;
945 }
703 if (ptr > end 946 if (ptr > end
704 || retAddrReg >= ARRAY_SIZE(reg_info) 947 || retAddrReg >= ARRAY_SIZE(reg_info)
705 || REG_INVALID(retAddrReg) 948 || REG_INVALID(retAddrReg)
@@ -723,9 +966,7 @@ int unwind(struct unwind_frame_info *frame)
723 if (cie == NULL || fde == NULL) { 966 if (cie == NULL || fde == NULL) {
724#ifdef CONFIG_FRAME_POINTER 967#ifdef CONFIG_FRAME_POINTER
725 unsigned long top, bottom; 968 unsigned long top, bottom;
726#endif
727 969
728#ifdef CONFIG_FRAME_POINTER
729 top = STACK_TOP(frame->task); 970 top = STACK_TOP(frame->task);
730 bottom = STACK_BOTTOM(frame->task); 971 bottom = STACK_BOTTOM(frame->task);
731# if FRAME_RETADDR_OFFSET < 0 972# if FRAME_RETADDR_OFFSET < 0
diff --git a/kernel/user.c b/kernel/user.c
index 6408c04242..220e586127 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -187,6 +187,17 @@ void switch_uid(struct user_struct *new_user)
187 atomic_dec(&old_user->processes); 187 atomic_dec(&old_user->processes);
188 switch_uid_keyring(new_user); 188 switch_uid_keyring(new_user);
189 current->user = new_user; 189 current->user = new_user;
190
191 /*
192 * We need to synchronize with __sigqueue_alloc()
193 * doing a get_uid(p->user).. If that saw the old
194 * user value, we need to wait until it has exited
195 * its critical region before we can free the old
196 * structure.
197 */
198 smp_mb();
199 spin_unlock_wait(&current->sighand->siglock);
200
190 free_uid(old_user); 201 free_uid(old_user);
191 suid_keys(current); 202 suid_keys(current);
192} 203}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3df9bfc7ff..17c2f03d2c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -99,7 +99,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
99 * @wq: workqueue to use 99 * @wq: workqueue to use
100 * @work: work to queue 100 * @work: work to queue
101 * 101 *
102 * Returns non-zero if it was successfully added. 102 * Returns 0 if @work was already on a queue, non-zero otherwise.
103 * 103 *
104 * We queue the work to the CPU it was submitted, but there is no 104 * We queue the work to the CPU it was submitted, but there is no
105 * guarantee that it will be processed by that CPU. 105 * guarantee that it will be processed by that CPU.
@@ -138,7 +138,7 @@ static void delayed_work_timer_fn(unsigned long __data)
138 * @work: work to queue 138 * @work: work to queue
139 * @delay: number of jiffies to wait before queueing 139 * @delay: number of jiffies to wait before queueing
140 * 140 *
141 * Returns non-zero if it was successfully added. 141 * Returns 0 if @work was already on a queue, non-zero otherwise.
142 */ 142 */
143int fastcall queue_delayed_work(struct workqueue_struct *wq, 143int fastcall queue_delayed_work(struct workqueue_struct *wq,
144 struct work_struct *work, unsigned long delay) 144 struct work_struct *work, unsigned long delay)
@@ -169,7 +169,7 @@ EXPORT_SYMBOL_GPL(queue_delayed_work);
169 * @work: work to queue 169 * @work: work to queue
170 * @delay: number of jiffies to wait before queueing 170 * @delay: number of jiffies to wait before queueing
171 * 171 *
172 * Returns non-zero if it was successfully added. 172 * Returns 0 if @work was already on a queue, non-zero otherwise.
173 */ 173 */
174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 174int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
175 struct work_struct *work, unsigned long delay) 175 struct work_struct *work, unsigned long delay)