aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/futex.c5
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/kprobes.c134
-rw-r--r--kernel/module.c1
-rw-r--r--kernel/posix-cpu-timers.c6
-rw-r--r--kernel/power/snapshot.c19
-rw-r--r--kernel/power/swsusp.c45
-rw-r--r--kernel/printk.c1
-rw-r--r--kernel/ptrace.c82
-rw-r--r--kernel/sched.c5
-rw-r--r--kernel/softirq.c3
-rw-r--r--kernel/softlockup.c3
-rw-r--r--kernel/sys.c26
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/workqueue.c2
17 files changed, 216 insertions, 126 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 537394b25e8d..452a1d116178 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -28,6 +28,7 @@
28#include <linux/cpuset.h> 28#include <linux/cpuset.h>
29#include <linux/syscalls.h> 29#include <linux/syscalls.h>
30#include <linux/signal.h> 30#include <linux/signal.h>
31#include <linux/cn_proc.h>
31 32
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
33#include <asm/unistd.h> 34#include <asm/unistd.h>
@@ -863,6 +864,7 @@ fastcall NORET_TYPE void do_exit(long code)
863 module_put(tsk->binfmt->module); 864 module_put(tsk->binfmt->module);
864 865
865 tsk->exit_code = code; 866 tsk->exit_code = code;
867 proc_exit_connector(tsk);
866 exit_notify(tsk); 868 exit_notify(tsk);
867#ifdef CONFIG_NUMA 869#ifdef CONFIG_NUMA
868 mpol_free(tsk->mempolicy); 870 mpol_free(tsk->mempolicy);
diff --git a/kernel/fork.c b/kernel/fork.c
index 8a069612eac3..efac2c58ec7d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -42,6 +42,7 @@
42#include <linux/profile.h> 42#include <linux/profile.h>
43#include <linux/rmap.h> 43#include <linux/rmap.h>
44#include <linux/acct.h> 44#include <linux/acct.h>
45#include <linux/cn_proc.h>
45 46
46#include <asm/pgtable.h> 47#include <asm/pgtable.h>
47#include <asm/pgalloc.h> 48#include <asm/pgalloc.h>
@@ -1143,6 +1144,7 @@ static task_t *copy_process(unsigned long clone_flags,
1143 __get_cpu_var(process_counts)++; 1144 __get_cpu_var(process_counts)++;
1144 } 1145 }
1145 1146
1147 proc_fork_connector(p);
1146 if (!current->signal->tty && p->signal->tty) 1148 if (!current->signal->tty && p->signal->tty)
1147 p->signal->tty = NULL; 1149 p->signal->tty = NULL;
1148 1150
diff --git a/kernel/futex.c b/kernel/futex.c
index 3b4d5ad44cc6..aca8d10704f6 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -365,6 +365,11 @@ retry:
365 if (bh1 != bh2) 365 if (bh1 != bh2)
366 spin_unlock(&bh2->lock); 366 spin_unlock(&bh2->lock);
367 367
368 if (unlikely(op_ret != -EFAULT)) {
369 ret = op_ret;
370 goto out;
371 }
372
368 /* futex_atomic_op_inuser needs to both read and write 373 /* futex_atomic_op_inuser needs to both read and write
369 * *(int __user *)uaddr2, but we can't modify it 374 * *(int __user *)uaddr2, but we can't modify it
370 * non-atomically. Therefore, if get_user below is not 375 * non-atomically. Therefore, if get_user below is not
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1cfdb08ddf20..3bd7226d15fa 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -24,6 +24,7 @@ cpumask_t __cacheline_aligned pending_irq_cpumask[NR_IRQS];
24 24
25/** 25/**
26 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 26 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
27 * @irq: interrupt number to wait for
27 * 28 *
28 * This function waits for any pending IRQ handlers for this interrupt 29 * This function waits for any pending IRQ handlers for this interrupt
29 * to complete before returning. If you use this function while 30 * to complete before returning. If you use this function while
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ce4915dd683a..5beda378cc75 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -32,7 +32,6 @@
32 * <prasanna@in.ibm.com> added function-return probes. 32 * <prasanna@in.ibm.com> added function-return probes.
33 */ 33 */
34#include <linux/kprobes.h> 34#include <linux/kprobes.h>
35#include <linux/spinlock.h>
36#include <linux/hash.h> 35#include <linux/hash.h>
37#include <linux/init.h> 36#include <linux/init.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
@@ -49,9 +48,9 @@
49static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; 48static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
50static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; 49static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
51 50
52unsigned int kprobe_cpu = NR_CPUS; 51static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */
53static DEFINE_SPINLOCK(kprobe_lock); 52DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
54static struct kprobe *curr_kprobe; 53static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
55 54
56/* 55/*
57 * kprobe->ainsn.insn points to the copy of the instruction to be 56 * kprobe->ainsn.insn points to the copy of the instruction to be
@@ -153,50 +152,31 @@ void __kprobes free_insn_slot(kprobe_opcode_t *slot)
153 } 152 }
154} 153}
155 154
156/* Locks kprobe: irqs must be disabled */ 155/* We have preemption disabled.. so it is safe to use __ versions */
157void __kprobes lock_kprobes(void) 156static inline void set_kprobe_instance(struct kprobe *kp)
158{ 157{
159 unsigned long flags = 0; 158 __get_cpu_var(kprobe_instance) = kp;
160
161 /* Avoiding local interrupts to happen right after we take the kprobe_lock
162 * and before we get a chance to update kprobe_cpu, this to prevent
163 * deadlock when we have a kprobe on ISR routine and a kprobe on task
164 * routine
165 */
166 local_irq_save(flags);
167
168 spin_lock(&kprobe_lock);
169 kprobe_cpu = smp_processor_id();
170
171 local_irq_restore(flags);
172} 159}
173 160
174void __kprobes unlock_kprobes(void) 161static inline void reset_kprobe_instance(void)
175{ 162{
176 unsigned long flags = 0; 163 __get_cpu_var(kprobe_instance) = NULL;
177
178 /* Avoiding local interrupts to happen right after we update
179 * kprobe_cpu and before we get a a chance to release kprobe_lock,
180 * this to prevent deadlock when we have a kprobe on ISR routine and
181 * a kprobe on task routine
182 */
183 local_irq_save(flags);
184
185 kprobe_cpu = NR_CPUS;
186 spin_unlock(&kprobe_lock);
187
188 local_irq_restore(flags);
189} 164}
190 165
191/* You have to be holding the kprobe_lock */ 166/*
167 * This routine is called either:
168 * - under the kprobe_lock spinlock - during kprobe_[un]register()
169 * OR
170 * - with preemption disabled - from arch/xxx/kernel/kprobes.c
171 */
192struct kprobe __kprobes *get_kprobe(void *addr) 172struct kprobe __kprobes *get_kprobe(void *addr)
193{ 173{
194 struct hlist_head *head; 174 struct hlist_head *head;
195 struct hlist_node *node; 175 struct hlist_node *node;
176 struct kprobe *p;
196 177
197 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)]; 178 head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
198 hlist_for_each(node, head) { 179 hlist_for_each_entry_rcu(p, node, head, hlist) {
199 struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
200 if (p->addr == addr) 180 if (p->addr == addr)
201 return p; 181 return p;
202 } 182 }
@@ -211,13 +191,13 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
211{ 191{
212 struct kprobe *kp; 192 struct kprobe *kp;
213 193
214 list_for_each_entry(kp, &p->list, list) { 194 list_for_each_entry_rcu(kp, &p->list, list) {
215 if (kp->pre_handler) { 195 if (kp->pre_handler) {
216 curr_kprobe = kp; 196 set_kprobe_instance(kp);
217 if (kp->pre_handler(kp, regs)) 197 if (kp->pre_handler(kp, regs))
218 return 1; 198 return 1;
219 } 199 }
220 curr_kprobe = NULL; 200 reset_kprobe_instance();
221 } 201 }
222 return 0; 202 return 0;
223} 203}
@@ -227,11 +207,11 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
227{ 207{
228 struct kprobe *kp; 208 struct kprobe *kp;
229 209
230 list_for_each_entry(kp, &p->list, list) { 210 list_for_each_entry_rcu(kp, &p->list, list) {
231 if (kp->post_handler) { 211 if (kp->post_handler) {
232 curr_kprobe = kp; 212 set_kprobe_instance(kp);
233 kp->post_handler(kp, regs, flags); 213 kp->post_handler(kp, regs, flags);
234 curr_kprobe = NULL; 214 reset_kprobe_instance();
235 } 215 }
236 } 216 }
237 return; 217 return;
@@ -240,12 +220,14 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
240static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, 220static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
241 int trapnr) 221 int trapnr)
242{ 222{
223 struct kprobe *cur = __get_cpu_var(kprobe_instance);
224
243 /* 225 /*
244 * if we faulted "during" the execution of a user specified 226 * if we faulted "during" the execution of a user specified
245 * probe handler, invoke just that probe's fault handler 227 * probe handler, invoke just that probe's fault handler
246 */ 228 */
247 if (curr_kprobe && curr_kprobe->fault_handler) { 229 if (cur && cur->fault_handler) {
248 if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr)) 230 if (cur->fault_handler(cur, regs, trapnr))
249 return 1; 231 return 1;
250 } 232 }
251 return 0; 233 return 0;
@@ -253,17 +235,18 @@ static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
253 235
254static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs) 236static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
255{ 237{
256 struct kprobe *kp = curr_kprobe; 238 struct kprobe *cur = __get_cpu_var(kprobe_instance);
257 if (curr_kprobe && kp->break_handler) { 239 int ret = 0;
258 if (kp->break_handler(kp, regs)) { 240
259 curr_kprobe = NULL; 241 if (cur && cur->break_handler) {
260 return 1; 242 if (cur->break_handler(cur, regs))
261 } 243 ret = 1;
262 } 244 }
263 curr_kprobe = NULL; 245 reset_kprobe_instance();
264 return 0; 246 return ret;
265} 247}
266 248
249/* Called with kretprobe_lock held */
267struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp) 250struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
268{ 251{
269 struct hlist_node *node; 252 struct hlist_node *node;
@@ -273,6 +256,7 @@ struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
273 return NULL; 256 return NULL;
274} 257}
275 258
259/* Called with kretprobe_lock held */
276static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe 260static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
277 *rp) 261 *rp)
278{ 262{
@@ -283,6 +267,7 @@ static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
283 return NULL; 267 return NULL;
284} 268}
285 269
270/* Called with kretprobe_lock held */
286void __kprobes add_rp_inst(struct kretprobe_instance *ri) 271void __kprobes add_rp_inst(struct kretprobe_instance *ri)
287{ 272{
288 /* 273 /*
@@ -301,6 +286,7 @@ void __kprobes add_rp_inst(struct kretprobe_instance *ri)
301 hlist_add_head(&ri->uflist, &ri->rp->used_instances); 286 hlist_add_head(&ri->uflist, &ri->rp->used_instances);
302} 287}
303 288
289/* Called with kretprobe_lock held */
304void __kprobes recycle_rp_inst(struct kretprobe_instance *ri) 290void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
305{ 291{
306 /* remove rp inst off the rprobe_inst_table */ 292 /* remove rp inst off the rprobe_inst_table */
@@ -334,13 +320,13 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
334 struct hlist_node *node, *tmp; 320 struct hlist_node *node, *tmp;
335 unsigned long flags = 0; 321 unsigned long flags = 0;
336 322
337 spin_lock_irqsave(&kprobe_lock, flags); 323 spin_lock_irqsave(&kretprobe_lock, flags);
338 head = kretprobe_inst_table_head(current); 324 head = kretprobe_inst_table_head(current);
339 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { 325 hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
340 if (ri->task == tk) 326 if (ri->task == tk)
341 recycle_rp_inst(ri); 327 recycle_rp_inst(ri);
342 } 328 }
343 spin_unlock_irqrestore(&kprobe_lock, flags); 329 spin_unlock_irqrestore(&kretprobe_lock, flags);
344} 330}
345 331
346/* 332/*
@@ -351,9 +337,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
351 struct pt_regs *regs) 337 struct pt_regs *regs)
352{ 338{
353 struct kretprobe *rp = container_of(p, struct kretprobe, kp); 339 struct kretprobe *rp = container_of(p, struct kretprobe, kp);
340 unsigned long flags = 0;
354 341
355 /*TODO: consider to only swap the RA after the last pre_handler fired */ 342 /*TODO: consider to only swap the RA after the last pre_handler fired */
343 spin_lock_irqsave(&kretprobe_lock, flags);
356 arch_prepare_kretprobe(rp, regs); 344 arch_prepare_kretprobe(rp, regs);
345 spin_unlock_irqrestore(&kretprobe_lock, flags);
357 return 0; 346 return 0;
358} 347}
359 348
@@ -384,13 +373,13 @@ static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
384 struct kprobe *kp; 373 struct kprobe *kp;
385 374
386 if (p->break_handler) { 375 if (p->break_handler) {
387 list_for_each_entry(kp, &old_p->list, list) { 376 list_for_each_entry_rcu(kp, &old_p->list, list) {
388 if (kp->break_handler) 377 if (kp->break_handler)
389 return -EEXIST; 378 return -EEXIST;
390 } 379 }
391 list_add_tail(&p->list, &old_p->list); 380 list_add_tail_rcu(&p->list, &old_p->list);
392 } else 381 } else
393 list_add(&p->list, &old_p->list); 382 list_add_rcu(&p->list, &old_p->list);
394 return 0; 383 return 0;
395} 384}
396 385
@@ -408,18 +397,18 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
408 ap->break_handler = aggr_break_handler; 397 ap->break_handler = aggr_break_handler;
409 398
410 INIT_LIST_HEAD(&ap->list); 399 INIT_LIST_HEAD(&ap->list);
411 list_add(&p->list, &ap->list); 400 list_add_rcu(&p->list, &ap->list);
412 401
413 INIT_HLIST_NODE(&ap->hlist); 402 INIT_HLIST_NODE(&ap->hlist);
414 hlist_del(&p->hlist); 403 hlist_del_rcu(&p->hlist);
415 hlist_add_head(&ap->hlist, 404 hlist_add_head_rcu(&ap->hlist,
416 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]); 405 &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
417} 406}
418 407
419/* 408/*
420 * This is the second or subsequent kprobe at the address - handle 409 * This is the second or subsequent kprobe at the address - handle
421 * the intricacies 410 * the intricacies
422 * TODO: Move kcalloc outside the spinlock 411 * TODO: Move kcalloc outside the spin_lock
423 */ 412 */
424static int __kprobes register_aggr_kprobe(struct kprobe *old_p, 413static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
425 struct kprobe *p) 414 struct kprobe *p)
@@ -445,7 +434,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
445static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) 434static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
446{ 435{
447 arch_disarm_kprobe(p); 436 arch_disarm_kprobe(p);
448 hlist_del(&p->hlist); 437 hlist_del_rcu(&p->hlist);
449 spin_unlock_irqrestore(&kprobe_lock, flags); 438 spin_unlock_irqrestore(&kprobe_lock, flags);
450 arch_remove_kprobe(p); 439 arch_remove_kprobe(p);
451} 440}
@@ -453,11 +442,10 @@ static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
453static inline void cleanup_aggr_kprobe(struct kprobe *old_p, 442static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
454 struct kprobe *p, unsigned long flags) 443 struct kprobe *p, unsigned long flags)
455{ 444{
456 list_del(&p->list); 445 list_del_rcu(&p->list);
457 if (list_empty(&old_p->list)) { 446 if (list_empty(&old_p->list))
458 cleanup_kprobe(old_p, flags); 447 cleanup_kprobe(old_p, flags);
459 kfree(old_p); 448 else
460 } else
461 spin_unlock_irqrestore(&kprobe_lock, flags); 449 spin_unlock_irqrestore(&kprobe_lock, flags);
462} 450}
463 451
@@ -480,9 +468,9 @@ int __kprobes register_kprobe(struct kprobe *p)
480 if ((ret = arch_prepare_kprobe(p)) != 0) 468 if ((ret = arch_prepare_kprobe(p)) != 0)
481 goto rm_kprobe; 469 goto rm_kprobe;
482 470
471 p->nmissed = 0;
483 spin_lock_irqsave(&kprobe_lock, flags); 472 spin_lock_irqsave(&kprobe_lock, flags);
484 old_p = get_kprobe(p->addr); 473 old_p = get_kprobe(p->addr);
485 p->nmissed = 0;
486 if (old_p) { 474 if (old_p) {
487 ret = register_aggr_kprobe(old_p, p); 475 ret = register_aggr_kprobe(old_p, p);
488 goto out; 476 goto out;
@@ -490,7 +478,7 @@ int __kprobes register_kprobe(struct kprobe *p)
490 478
491 arch_copy_kprobe(p); 479 arch_copy_kprobe(p);
492 INIT_HLIST_NODE(&p->hlist); 480 INIT_HLIST_NODE(&p->hlist);
493 hlist_add_head(&p->hlist, 481 hlist_add_head_rcu(&p->hlist,
494 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); 482 &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
495 483
496 arch_arm_kprobe(p); 484 arch_arm_kprobe(p);
@@ -511,10 +499,16 @@ void __kprobes unregister_kprobe(struct kprobe *p)
511 spin_lock_irqsave(&kprobe_lock, flags); 499 spin_lock_irqsave(&kprobe_lock, flags);
512 old_p = get_kprobe(p->addr); 500 old_p = get_kprobe(p->addr);
513 if (old_p) { 501 if (old_p) {
502 /* cleanup_*_kprobe() does the spin_unlock_irqrestore */
514 if (old_p->pre_handler == aggr_pre_handler) 503 if (old_p->pre_handler == aggr_pre_handler)
515 cleanup_aggr_kprobe(old_p, p, flags); 504 cleanup_aggr_kprobe(old_p, p, flags);
516 else 505 else
517 cleanup_kprobe(p, flags); 506 cleanup_kprobe(p, flags);
507
508 synchronize_sched();
509 if (old_p->pre_handler == aggr_pre_handler &&
510 list_empty(&old_p->list))
511 kfree(old_p);
518 } else 512 } else
519 spin_unlock_irqrestore(&kprobe_lock, flags); 513 spin_unlock_irqrestore(&kprobe_lock, flags);
520} 514}
@@ -591,13 +585,13 @@ void __kprobes unregister_kretprobe(struct kretprobe *rp)
591 585
592 unregister_kprobe(&rp->kp); 586 unregister_kprobe(&rp->kp);
593 /* No race here */ 587 /* No race here */
594 spin_lock_irqsave(&kprobe_lock, flags); 588 spin_lock_irqsave(&kretprobe_lock, flags);
595 free_rp_inst(rp); 589 free_rp_inst(rp);
596 while ((ri = get_used_rp_inst(rp)) != NULL) { 590 while ((ri = get_used_rp_inst(rp)) != NULL) {
597 ri->rp = NULL; 591 ri->rp = NULL;
598 hlist_del(&ri->uflist); 592 hlist_del(&ri->uflist);
599 } 593 }
600 spin_unlock_irqrestore(&kprobe_lock, flags); 594 spin_unlock_irqrestore(&kretprobe_lock, flags);
601} 595}
602 596
603static int __init init_kprobes(void) 597static int __init init_kprobes(void)
diff --git a/kernel/module.c b/kernel/module.c
index ff5c500ab625..2ea929d51ad0 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -37,6 +37,7 @@
37#include <linux/stop_machine.h> 37#include <linux/stop_machine.h>
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/string.h> 39#include <linux/string.h>
40#include <linux/sched.h>
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/semaphore.h> 42#include <asm/semaphore.h>
42#include <asm/cacheflush.h> 43#include <asm/cacheflush.h>
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 91a894264941..84af54c39e1b 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -497,7 +497,7 @@ static void process_timer_rebalance(struct task_struct *p,
497 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 497 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
498 nthreads); 498 nthreads);
499 do { 499 do {
500 if (!unlikely(t->flags & PF_EXITING)) { 500 if (likely(!(t->flags & PF_EXITING))) {
501 ticks = cputime_add(prof_ticks(t), left); 501 ticks = cputime_add(prof_ticks(t), left);
502 if (cputime_eq(t->it_prof_expires, 502 if (cputime_eq(t->it_prof_expires,
503 cputime_zero) || 503 cputime_zero) ||
@@ -512,7 +512,7 @@ static void process_timer_rebalance(struct task_struct *p,
512 left = cputime_div(cputime_sub(expires.cpu, val.cpu), 512 left = cputime_div(cputime_sub(expires.cpu, val.cpu),
513 nthreads); 513 nthreads);
514 do { 514 do {
515 if (!unlikely(t->flags & PF_EXITING)) { 515 if (likely(!(t->flags & PF_EXITING))) {
516 ticks = cputime_add(virt_ticks(t), left); 516 ticks = cputime_add(virt_ticks(t), left);
517 if (cputime_eq(t->it_virt_expires, 517 if (cputime_eq(t->it_virt_expires,
518 cputime_zero) || 518 cputime_zero) ||
@@ -527,7 +527,7 @@ static void process_timer_rebalance(struct task_struct *p,
527 nsleft = expires.sched - val.sched; 527 nsleft = expires.sched - val.sched;
528 do_div(nsleft, nthreads); 528 do_div(nsleft, nthreads);
529 do { 529 do {
530 if (!unlikely(t->flags & PF_EXITING)) { 530 if (likely(!(t->flags & PF_EXITING))) {
531 ns = t->sched_time + nsleft; 531 ns = t->sched_time + nsleft;
532 if (t->it_sched_expires == 0 || 532 if (t->it_sched_expires == 0 ||
533 t->it_sched_expires > ns) { 533 t->it_sched_expires > ns) {
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 42a628704398..723f5179883e 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -168,9 +168,8 @@ static unsigned count_data_pages(void)
168{ 168{
169 struct zone *zone; 169 struct zone *zone;
170 unsigned long zone_pfn; 170 unsigned long zone_pfn;
171 unsigned n; 171 unsigned int n = 0;
172 172
173 n = 0;
174 for_each_zone (zone) { 173 for_each_zone (zone) {
175 if (is_highmem(zone)) 174 if (is_highmem(zone))
176 continue; 175 continue;
@@ -250,10 +249,10 @@ static inline void fill_pb_page(struct pbe *pbpage)
250 * of memory pages allocated with alloc_pagedir() 249 * of memory pages allocated with alloc_pagedir()
251 */ 250 */
252 251
253void create_pbe_list(struct pbe *pblist, unsigned nr_pages) 252void create_pbe_list(struct pbe *pblist, unsigned int nr_pages)
254{ 253{
255 struct pbe *pbpage, *p; 254 struct pbe *pbpage, *p;
256 unsigned num = PBES_PER_PAGE; 255 unsigned int num = PBES_PER_PAGE;
257 256
258 for_each_pb_page (pbpage, pblist) { 257 for_each_pb_page (pbpage, pblist) {
259 if (num >= nr_pages) 258 if (num >= nr_pages)
@@ -293,9 +292,9 @@ static void *alloc_image_page(void)
293 * On each page we set up a list of struct_pbe elements. 292 * On each page we set up a list of struct_pbe elements.
294 */ 293 */
295 294
296struct pbe *alloc_pagedir(unsigned nr_pages) 295struct pbe *alloc_pagedir(unsigned int nr_pages)
297{ 296{
298 unsigned num; 297 unsigned int num;
299 struct pbe *pblist, *pbe; 298 struct pbe *pblist, *pbe;
300 299
301 if (!nr_pages) 300 if (!nr_pages)
@@ -329,7 +328,7 @@ void swsusp_free(void)
329 for_each_zone(zone) { 328 for_each_zone(zone) {
330 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn) 329 for (zone_pfn = 0; zone_pfn < zone->spanned_pages; ++zone_pfn)
331 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) { 330 if (pfn_valid(zone_pfn + zone->zone_start_pfn)) {
332 struct page * page; 331 struct page *page;
333 page = pfn_to_page(zone_pfn + zone->zone_start_pfn); 332 page = pfn_to_page(zone_pfn + zone->zone_start_pfn);
334 if (PageNosave(page) && PageNosaveFree(page)) { 333 if (PageNosave(page) && PageNosaveFree(page)) {
335 ClearPageNosave(page); 334 ClearPageNosave(page);
@@ -348,7 +347,7 @@ void swsusp_free(void)
348 * free pages. 347 * free pages.
349 */ 348 */
350 349
351static int enough_free_mem(unsigned nr_pages) 350static int enough_free_mem(unsigned int nr_pages)
352{ 351{
353 pr_debug("swsusp: available memory: %u pages\n", nr_free_pages()); 352 pr_debug("swsusp: available memory: %u pages\n", nr_free_pages());
354 return nr_free_pages() > (nr_pages + PAGES_FOR_IO + 353 return nr_free_pages() > (nr_pages + PAGES_FOR_IO +
@@ -356,7 +355,7 @@ static int enough_free_mem(unsigned nr_pages)
356} 355}
357 356
358 357
359static struct pbe *swsusp_alloc(unsigned nr_pages) 358static struct pbe *swsusp_alloc(unsigned int nr_pages)
360{ 359{
361 struct pbe *pblist, *p; 360 struct pbe *pblist, *p;
362 361
@@ -380,7 +379,7 @@ static struct pbe *swsusp_alloc(unsigned nr_pages)
380 379
381asmlinkage int swsusp_save(void) 380asmlinkage int swsusp_save(void)
382{ 381{
383 unsigned nr_pages; 382 unsigned int nr_pages;
384 383
385 pr_debug("swsusp: critical section: \n"); 384 pr_debug("swsusp: critical section: \n");
386 if (save_highmem()) { 385 if (save_highmem()) {
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 12db1d2ad61f..e1ab28b9b217 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -85,18 +85,11 @@ unsigned int nr_copy_pages __nosavedata = 0;
85/* Suspend pagedir is allocated before final copy, therefore it 85/* Suspend pagedir is allocated before final copy, therefore it
86 must be freed after resume 86 must be freed after resume
87 87
88 Warning: this is evil. There are actually two pagedirs at time of
89 resume. One is "pagedir_save", which is empty frame allocated at
90 time of suspend, that must be freed. Second is "pagedir_nosave",
91 allocated at time of resume, that travels through memory not to
92 collide with anything.
93
94 Warning: this is even more evil than it seems. Pagedirs this file 88 Warning: this is even more evil than it seems. Pagedirs this file
95 talks about are completely different from page directories used by 89 talks about are completely different from page directories used by
96 MMU hardware. 90 MMU hardware.
97 */ 91 */
98suspend_pagedir_t *pagedir_nosave __nosavedata = NULL; 92suspend_pagedir_t *pagedir_nosave __nosavedata = NULL;
99suspend_pagedir_t *pagedir_save;
100 93
101#define SWSUSP_SIG "S1SUSPEND" 94#define SWSUSP_SIG "S1SUSPEND"
102 95
@@ -122,8 +115,8 @@ static struct swsusp_info swsusp_info;
122static unsigned short swapfile_used[MAX_SWAPFILES]; 115static unsigned short swapfile_used[MAX_SWAPFILES];
123static unsigned short root_swap; 116static unsigned short root_swap;
124 117
125static int write_page(unsigned long addr, swp_entry_t * loc); 118static int write_page(unsigned long addr, swp_entry_t *loc);
126static int bio_read_page(pgoff_t page_off, void * page); 119static int bio_read_page(pgoff_t page_off, void *page);
127 120
128static u8 key_iv[MAXKEY+MAXIV]; 121static u8 key_iv[MAXKEY+MAXIV];
129 122
@@ -355,7 +348,7 @@ static void lock_swapdevices(void)
355 * This is a partial improvement, since we will at least return other 348 * This is a partial improvement, since we will at least return other
356 * errors, though we need to eventually fix the damn code. 349 * errors, though we need to eventually fix the damn code.
357 */ 350 */
358static int write_page(unsigned long addr, swp_entry_t * loc) 351static int write_page(unsigned long addr, swp_entry_t *loc)
359{ 352{
360 swp_entry_t entry; 353 swp_entry_t entry;
361 int error = 0; 354 int error = 0;
@@ -383,9 +376,9 @@ static int write_page(unsigned long addr, swp_entry_t * loc)
383static void data_free(void) 376static void data_free(void)
384{ 377{
385 swp_entry_t entry; 378 swp_entry_t entry;
386 struct pbe * p; 379 struct pbe *p;
387 380
388 for_each_pbe(p, pagedir_nosave) { 381 for_each_pbe (p, pagedir_nosave) {
389 entry = p->swap_address; 382 entry = p->swap_address;
390 if (entry.val) 383 if (entry.val)
391 swap_free(entry); 384 swap_free(entry);
@@ -492,8 +485,8 @@ static void free_pagedir_entries(void)
492static int write_pagedir(void) 485static int write_pagedir(void)
493{ 486{
494 int error = 0; 487 int error = 0;
495 unsigned n = 0; 488 unsigned int n = 0;
496 struct pbe * pbe; 489 struct pbe *pbe;
497 490
498 printk( "Writing pagedir..."); 491 printk( "Writing pagedir...");
499 for_each_pb_page (pbe, pagedir_nosave) { 492 for_each_pb_page (pbe, pagedir_nosave) {
@@ -543,7 +536,7 @@ static int write_suspend_image(void)
543 * We should only consider resume_device. 536 * We should only consider resume_device.
544 */ 537 */
545 538
546int enough_swap(unsigned nr_pages) 539int enough_swap(unsigned int nr_pages)
547{ 540{
548 struct sysinfo i; 541 struct sysinfo i;
549 542
@@ -694,7 +687,7 @@ static int check_pagedir(struct pbe *pblist)
694 * restore from the loaded pages later. We relocate them here. 687 * restore from the loaded pages later. We relocate them here.
695 */ 688 */
696 689
697static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist) 690static struct pbe *swsusp_pagedir_relocate(struct pbe *pblist)
698{ 691{
699 struct zone *zone; 692 struct zone *zone;
700 unsigned long zone_pfn; 693 unsigned long zone_pfn;
@@ -770,7 +763,7 @@ static struct pbe * swsusp_pagedir_relocate(struct pbe *pblist)
770 763
771static atomic_t io_done = ATOMIC_INIT(0); 764static atomic_t io_done = ATOMIC_INIT(0);
772 765
773static int end_io(struct bio * bio, unsigned int num, int err) 766static int end_io(struct bio *bio, unsigned int num, int err)
774{ 767{
775 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) 768 if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
776 panic("I/O error reading memory image"); 769 panic("I/O error reading memory image");
@@ -778,7 +771,7 @@ static int end_io(struct bio * bio, unsigned int num, int err)
778 return 0; 771 return 0;
779} 772}
780 773
781static struct block_device * resume_bdev; 774static struct block_device *resume_bdev;
782 775
783/** 776/**
784 * submit - submit BIO request. 777 * submit - submit BIO request.
@@ -791,10 +784,10 @@ static struct block_device * resume_bdev;
791 * Then submit it and wait. 784 * Then submit it and wait.
792 */ 785 */
793 786
794static int submit(int rw, pgoff_t page_off, void * page) 787static int submit(int rw, pgoff_t page_off, void *page)
795{ 788{
796 int error = 0; 789 int error = 0;
797 struct bio * bio; 790 struct bio *bio;
798 791
799 bio = bio_alloc(GFP_ATOMIC, 1); 792 bio = bio_alloc(GFP_ATOMIC, 1);
800 if (!bio) 793 if (!bio)
@@ -823,12 +816,12 @@ static int submit(int rw, pgoff_t page_off, void * page)
823 return error; 816 return error;
824} 817}
825 818
826static int bio_read_page(pgoff_t page_off, void * page) 819static int bio_read_page(pgoff_t page_off, void *page)
827{ 820{
828 return submit(READ, page_off, page); 821 return submit(READ, page_off, page);
829} 822}
830 823
831static int bio_write_page(pgoff_t page_off, void * page) 824static int bio_write_page(pgoff_t page_off, void *page)
832{ 825{
833 return submit(WRITE, page_off, page); 826 return submit(WRITE, page_off, page);
834} 827}
@@ -838,7 +831,7 @@ static int bio_write_page(pgoff_t page_off, void * page)
838 * I really don't think that it's foolproof but more than nothing.. 831 * I really don't think that it's foolproof but more than nothing..
839 */ 832 */
840 833
841static const char * sanity_check(void) 834static const char *sanity_check(void)
842{ 835{
843 dump_info(); 836 dump_info();
844 if (swsusp_info.version_code != LINUX_VERSION_CODE) 837 if (swsusp_info.version_code != LINUX_VERSION_CODE)
@@ -864,7 +857,7 @@ static const char * sanity_check(void)
864 857
865static int check_header(void) 858static int check_header(void)
866{ 859{
867 const char * reason = NULL; 860 const char *reason = NULL;
868 int error; 861 int error;
869 862
870 if ((error = bio_read_page(swp_offset(swsusp_header.swsusp_info), &swsusp_info))) 863 if ((error = bio_read_page(swp_offset(swsusp_header.swsusp_info), &swsusp_info)))
@@ -912,7 +905,7 @@ static int check_sig(void)
912 905
913static int data_read(struct pbe *pblist) 906static int data_read(struct pbe *pblist)
914{ 907{
915 struct pbe * p; 908 struct pbe *p;
916 int error = 0; 909 int error = 0;
917 int i = 0; 910 int i = 0;
918 int mod = swsusp_info.image_pages / 100; 911 int mod = swsusp_info.image_pages / 100;
@@ -950,7 +943,7 @@ static int data_read(struct pbe *pblist)
950static int read_pagedir(struct pbe *pblist) 943static int read_pagedir(struct pbe *pblist)
951{ 944{
952 struct pbe *pbpage, *p; 945 struct pbe *pbpage, *p;
953 unsigned i = 0; 946 unsigned int i = 0;
954 int error; 947 int error;
955 948
956 if (!pblist) 949 if (!pblist)
diff --git a/kernel/printk.c b/kernel/printk.c
index 3cb9708209bc..e9be027bc930 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -806,7 +806,6 @@ void console_unblank(void)
806 c->unblank(); 806 c->unblank();
807 release_console_sem(); 807 release_console_sem();
808} 808}
809EXPORT_SYMBOL(console_unblank);
810 809
811/* 810/*
812 * Return the console tty driver structure and its associated index 811 * Return the console tty driver structure and its associated index
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 863eee8bff47..5b8dd98a230e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -406,3 +406,85 @@ int ptrace_request(struct task_struct *child, long request,
406 406
407 return ret; 407 return ret;
408} 408}
409
410#ifndef __ARCH_SYS_PTRACE
411static int ptrace_get_task_struct(long request, long pid,
412 struct task_struct **childp)
413{
414 struct task_struct *child;
415 int ret;
416
417 /*
418 * Callers use child == NULL as an indication to exit early even
419 * when the return value is 0, so make sure it is non-NULL here.
420 */
421 *childp = NULL;
422
423 if (request == PTRACE_TRACEME) {
424 /*
425 * Are we already being traced?
426 */
427 if (current->ptrace & PT_PTRACED)
428 return -EPERM;
429 ret = security_ptrace(current->parent, current);
430 if (ret)
431 return -EPERM;
432 /*
433 * Set the ptrace bit in the process ptrace flags.
434 */
435 current->ptrace |= PT_PTRACED;
436 return 0;
437 }
438
439 /*
440 * You may not mess with init
441 */
442 if (pid == 1)
443 return -EPERM;
444
445 ret = -ESRCH;
446 read_lock(&tasklist_lock);
447 child = find_task_by_pid(pid);
448 if (child)
449 get_task_struct(child);
450 read_unlock(&tasklist_lock);
451 if (!child)
452 return -ESRCH;
453
454 *childp = child;
455 return 0;
456}
457
458asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
459{
460 struct task_struct *child;
461 long ret;
462
463 /*
464 * This lock_kernel fixes a subtle race with suid exec
465 */
466 lock_kernel();
467 ret = ptrace_get_task_struct(request, pid, &child);
468 if (!child)
469 goto out;
470
471 if (request == PTRACE_ATTACH) {
472 ret = ptrace_attach(child);
473 goto out;
474 }
475
476 ret = ptrace_check_attach(child, request == PTRACE_KILL);
477 if (ret < 0)
478 goto out_put_task_struct;
479
480 ret = arch_ptrace(child, request, addr, data);
481 if (ret < 0)
482 goto out_put_task_struct;
483
484 out_put_task_struct:
485 put_task_struct(child);
486 out:
487 unlock_kernel();
488 return ret;
489}
490#endif /* __ARCH_SYS_PTRACE */
diff --git a/kernel/sched.c b/kernel/sched.c
index b4f4eb613537..3ce26954be12 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3563,8 +3563,6 @@ int idle_cpu(int cpu)
3563 return cpu_curr(cpu) == cpu_rq(cpu)->idle; 3563 return cpu_curr(cpu) == cpu_rq(cpu)->idle;
3564} 3564}
3565 3565
3566EXPORT_SYMBOL_GPL(idle_cpu);
3567
3568/** 3566/**
3569 * idle_task - return the idle task for a given cpu. 3567 * idle_task - return the idle task for a given cpu.
3570 * @cpu: the processor in question. 3568 * @cpu: the processor in question.
@@ -4680,7 +4678,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4680#ifdef CONFIG_HOTPLUG_CPU 4678#ifdef CONFIG_HOTPLUG_CPU
4681 case CPU_UP_CANCELED: 4679 case CPU_UP_CANCELED:
4682 /* Unbind it from offline cpu so it can run. Fall thru. */ 4680 /* Unbind it from offline cpu so it can run. Fall thru. */
4683 kthread_bind(cpu_rq(cpu)->migration_thread,smp_processor_id()); 4681 kthread_bind(cpu_rq(cpu)->migration_thread,
4682 any_online_cpu(cpu_online_map));
4684 kthread_stop(cpu_rq(cpu)->migration_thread); 4683 kthread_stop(cpu_rq(cpu)->migration_thread);
4685 cpu_rq(cpu)->migration_thread = NULL; 4684 cpu_rq(cpu)->migration_thread = NULL;
4686 break; 4685 break;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f766b2fc48be..ad3295cdded5 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -470,7 +470,8 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
470#ifdef CONFIG_HOTPLUG_CPU 470#ifdef CONFIG_HOTPLUG_CPU
471 case CPU_UP_CANCELED: 471 case CPU_UP_CANCELED:
472 /* Unbind so it can run. Fall thru. */ 472 /* Unbind so it can run. Fall thru. */
473 kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id()); 473 kthread_bind(per_cpu(ksoftirqd, hotcpu),
474 any_online_cpu(cpu_online_map));
474 case CPU_DEAD: 475 case CPU_DEAD:
475 p = per_cpu(ksoftirqd, hotcpu); 476 p = per_cpu(ksoftirqd, hotcpu);
476 per_cpu(ksoftirqd, hotcpu) = NULL; 477 per_cpu(ksoftirqd, hotcpu) = NULL;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 75976209cea7..a2dcceb9437d 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -123,7 +123,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
123#ifdef CONFIG_HOTPLUG_CPU 123#ifdef CONFIG_HOTPLUG_CPU
124 case CPU_UP_CANCELED: 124 case CPU_UP_CANCELED:
125 /* Unbind so it can run. Fall thru. */ 125 /* Unbind so it can run. Fall thru. */
126 kthread_bind(per_cpu(watchdog_task, hotcpu), smp_processor_id()); 126 kthread_bind(per_cpu(watchdog_task, hotcpu),
127 any_online_cpu(cpu_online_map));
127 case CPU_DEAD: 128 case CPU_DEAD:
128 p = per_cpu(watchdog_task, hotcpu); 129 p = per_cpu(watchdog_task, hotcpu);
129 per_cpu(watchdog_task, hotcpu) = NULL; 130 per_cpu(watchdog_task, hotcpu) = NULL;
diff --git a/kernel/sys.c b/kernel/sys.c
index 2fa1ed18123c..c43b3e22bbda 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -28,6 +28,7 @@
28#include <linux/suspend.h> 28#include <linux/suspend.h>
29#include <linux/tty.h> 29#include <linux/tty.h>
30#include <linux/signal.h> 30#include <linux/signal.h>
31#include <linux/cn_proc.h>
31 32
32#include <linux/compat.h> 33#include <linux/compat.h>
33#include <linux/syscalls.h> 34#include <linux/syscalls.h>
@@ -375,18 +376,21 @@ void emergency_restart(void)
375} 376}
376EXPORT_SYMBOL_GPL(emergency_restart); 377EXPORT_SYMBOL_GPL(emergency_restart);
377 378
378/**
379 * kernel_restart - reboot the system
380 *
381 * Shutdown everything and perform a clean reboot.
382 * This is not safe to call in interrupt context.
383 */
384void kernel_restart_prepare(char *cmd) 379void kernel_restart_prepare(char *cmd)
385{ 380{
386 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd); 381 notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
387 system_state = SYSTEM_RESTART; 382 system_state = SYSTEM_RESTART;
388 device_shutdown(); 383 device_shutdown();
389} 384}
385
386/**
387 * kernel_restart - reboot the system
388 * @cmd: pointer to buffer containing command to execute for restart
389 * or %NULL
390 *
391 * Shutdown everything and perform a clean reboot.
392 * This is not safe to call in interrupt context.
393 */
390void kernel_restart(char *cmd) 394void kernel_restart(char *cmd)
391{ 395{
392 kernel_restart_prepare(cmd); 396 kernel_restart_prepare(cmd);
@@ -623,6 +627,7 @@ asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
623 current->egid = new_egid; 627 current->egid = new_egid;
624 current->gid = new_rgid; 628 current->gid = new_rgid;
625 key_fsgid_changed(current); 629 key_fsgid_changed(current);
630 proc_id_connector(current, PROC_EVENT_GID);
626 return 0; 631 return 0;
627} 632}
628 633
@@ -662,6 +667,7 @@ asmlinkage long sys_setgid(gid_t gid)
662 return -EPERM; 667 return -EPERM;
663 668
664 key_fsgid_changed(current); 669 key_fsgid_changed(current);
670 proc_id_connector(current, PROC_EVENT_GID);
665 return 0; 671 return 0;
666} 672}
667 673
@@ -751,6 +757,7 @@ asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
751 current->fsuid = current->euid; 757 current->fsuid = current->euid;
752 758
753 key_fsuid_changed(current); 759 key_fsuid_changed(current);
760 proc_id_connector(current, PROC_EVENT_UID);
754 761
755 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); 762 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
756} 763}
@@ -798,6 +805,7 @@ asmlinkage long sys_setuid(uid_t uid)
798 current->suid = new_suid; 805 current->suid = new_suid;
799 806
800 key_fsuid_changed(current); 807 key_fsuid_changed(current);
808 proc_id_connector(current, PROC_EVENT_UID);
801 809
802 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); 810 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
803} 811}
@@ -846,6 +854,7 @@ asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
846 current->suid = suid; 854 current->suid = suid;
847 855
848 key_fsuid_changed(current); 856 key_fsuid_changed(current);
857 proc_id_connector(current, PROC_EVENT_UID);
849 858
850 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); 859 return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
851} 860}
@@ -898,6 +907,7 @@ asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
898 current->sgid = sgid; 907 current->sgid = sgid;
899 908
900 key_fsgid_changed(current); 909 key_fsgid_changed(current);
910 proc_id_connector(current, PROC_EVENT_GID);
901 return 0; 911 return 0;
902} 912}
903 913
@@ -940,6 +950,7 @@ asmlinkage long sys_setfsuid(uid_t uid)
940 } 950 }
941 951
942 key_fsuid_changed(current); 952 key_fsuid_changed(current);
953 proc_id_connector(current, PROC_EVENT_UID);
943 954
944 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); 955 security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
945 956
@@ -968,6 +979,7 @@ asmlinkage long sys_setfsgid(gid_t gid)
968 } 979 }
969 current->fsgid = gid; 980 current->fsgid = gid;
970 key_fsgid_changed(current); 981 key_fsgid_changed(current);
982 proc_id_connector(current, PROC_EVENT_GID);
971 } 983 }
972 return old_fsgid; 984 return old_fsgid;
973} 985}
@@ -1485,8 +1497,6 @@ EXPORT_SYMBOL(in_egroup_p);
1485 1497
1486DECLARE_RWSEM(uts_sem); 1498DECLARE_RWSEM(uts_sem);
1487 1499
1488EXPORT_SYMBOL(uts_sem);
1489
1490asmlinkage long sys_newuname(struct new_utsname __user * name) 1500asmlinkage long sys_newuname(struct new_utsname __user * name)
1491{ 1501{
1492 int errno = 0; 1502 int errno = 0;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8e56e2495542..c4f35f96884d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -952,7 +952,7 @@ static ctl_table fs_table[] = {
952 .data = &aio_nr, 952 .data = &aio_nr,
953 .maxlen = sizeof(aio_nr), 953 .maxlen = sizeof(aio_nr),
954 .mode = 0444, 954 .mode = 0444,
955 .proc_handler = &proc_dointvec, 955 .proc_handler = &proc_doulongvec_minmax,
956 }, 956 },
957 { 957 {
958 .ctl_name = FS_AIO_MAX_NR, 958 .ctl_name = FS_AIO_MAX_NR,
@@ -960,7 +960,7 @@ static ctl_table fs_table[] = {
960 .data = &aio_max_nr, 960 .data = &aio_max_nr,
961 .maxlen = sizeof(aio_max_nr), 961 .maxlen = sizeof(aio_max_nr),
962 .mode = 0644, 962 .mode = 0644,
963 .proc_handler = &proc_dointvec, 963 .proc_handler = &proc_doulongvec_minmax,
964 }, 964 },
965#ifdef CONFIG_INOTIFY 965#ifdef CONFIG_INOTIFY
966 { 966 {
@@ -1997,6 +1997,7 @@ int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
1997 * @filp: the file structure 1997 * @filp: the file structure
1998 * @buffer: the user buffer 1998 * @buffer: the user buffer
1999 * @lenp: the size of the user buffer 1999 * @lenp: the size of the user buffer
2000 * @ppos: pointer to the file position
2000 * 2001 *
2001 * Reads/writes up to table->maxlen/sizeof(unsigned int) integer 2002 * Reads/writes up to table->maxlen/sizeof(unsigned int) integer
2002 * values from/to the user buffer, treated as an ASCII string. 2003 * values from/to the user buffer, treated as an ASCII string.
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7cee222231bc..42df83d7fad2 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -524,7 +524,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
524 list_for_each_entry(wq, &workqueues, list) { 524 list_for_each_entry(wq, &workqueues, list) {
525 /* Unbind so it can run. */ 525 /* Unbind so it can run. */
526 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, 526 kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread,
527 smp_processor_id()); 527 any_online_cpu(cpu_online_map));
528 cleanup_workqueue_thread(wq, hotcpu); 528 cleanup_workqueue_thread(wq, hotcpu);
529 } 529 }
530 break; 530 break;