aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDmitry Torokhov <dtor_core@ameritech.net>2006-04-29 01:11:23 -0400
committerDmitry Torokhov <dtor_core@ameritech.net>2006-04-29 01:11:23 -0400
commit7b7e394185014e0f3bd8989cac937003f20ef9ce (patch)
tree3beda5f979bba0aa9822534e239cf1b45f3be69c /kernel
parentddc5d3414593e4d7ad7fbd33e7f7517fcc234544 (diff)
parent693f7d362055261882659475d2ef022e32edbff1 (diff)
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/hrtimer.c5
-rw-r--r--kernel/irq/Makefile3
-rw-r--r--kernel/irq/manage.c6
-rw-r--r--kernel/irq/migration.c5
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/Kconfig2
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/pm.c20
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/printk.c6
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/ptrace.c10
-rw-r--r--kernel/rcupdate.c4
-rw-r--r--kernel/sched.c64
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sys_ni.c12
-rw-r--r--kernel/time.c8
-rw-r--r--kernel/timer.c38
-rw-r--r--kernel/uid16.c59
-rw-r--r--kernel/workqueue.c2
25 files changed, 174 insertions, 121 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 6c2eeb8f6390..f86434d7b3d1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -34,6 +34,7 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/futex.h> 35#include <linux/futex.h>
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/pipe_fs_i.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/unistd.h> 40#include <asm/unistd.h>
@@ -55,7 +56,7 @@ static void __unhash_process(struct task_struct *p)
55 detach_pid(p, PIDTYPE_PGID); 56 detach_pid(p, PIDTYPE_PGID);
56 detach_pid(p, PIDTYPE_SID); 57 detach_pid(p, PIDTYPE_SID);
57 58
58 list_del_init(&p->tasks); 59 list_del_rcu(&p->tasks);
59 __get_cpu_var(process_counts)--; 60 __get_cpu_var(process_counts)--;
60 } 61 }
61 list_del_rcu(&p->thread_group); 62 list_del_rcu(&p->thread_group);
@@ -941,6 +942,9 @@ fastcall NORET_TYPE void do_exit(long code)
941 if (tsk->io_context) 942 if (tsk->io_context)
942 exit_io_context(); 943 exit_io_context();
943 944
945 if (tsk->splice_pipe)
946 __free_pipe_info(tsk->splice_pipe);
947
944 /* PF_DEAD causes final put_task_struct after we schedule. */ 948 /* PF_DEAD causes final put_task_struct after we schedule. */
945 preempt_disable(); 949 preempt_disable();
946 BUG_ON(tsk->flags & PF_DEAD); 950 BUG_ON(tsk->flags & PF_DEAD);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3384eb89cb1c..d2fa57d480d4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -124,12 +124,6 @@ void __put_task_struct(struct task_struct *tsk)
124 free_task(tsk); 124 free_task(tsk);
125} 125}
126 126
127void __put_task_struct_cb(struct rcu_head *rhp)
128{
129 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
130 __put_task_struct(tsk);
131}
132
133void __init fork_init(unsigned long mempages) 127void __init fork_init(unsigned long mempages)
134{ 128{
135#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 129#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -186,6 +180,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
186 atomic_set(&tsk->usage,2); 180 atomic_set(&tsk->usage,2);
187 atomic_set(&tsk->fs_excl, 0); 181 atomic_set(&tsk->fs_excl, 0);
188 tsk->btrace_seq = 0; 182 tsk->btrace_seq = 0;
183 tsk->splice_pipe = NULL;
189 return tsk; 184 return tsk;
190} 185}
191 186
@@ -1210,7 +1205,7 @@ static task_t *copy_process(unsigned long clone_flags,
1210 attach_pid(p, PIDTYPE_PGID, process_group(p)); 1205 attach_pid(p, PIDTYPE_PGID, process_group(p));
1211 attach_pid(p, PIDTYPE_SID, p->signal->session); 1206 attach_pid(p, PIDTYPE_SID, p->signal->session);
1212 1207
1213 list_add_tail(&p->tasks, &init_task.tasks); 1208 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1214 __get_cpu_var(process_counts)++; 1209 __get_cpu_var(process_counts)++;
1215 } 1210 }
1216 attach_pid(p, PIDTYPE_PID, p->pid); 1211 attach_pid(p, PIDTYPE_PID, p->pid);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f181ff4dd32e..b7f0388bd71c 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -501,6 +501,7 @@ int hrtimer_cancel(struct hrtimer *timer)
501 501
502 if (ret >= 0) 502 if (ret >= 0)
503 return ret; 503 return ret;
504 cpu_relax();
504 } 505 }
505} 506}
506 507
@@ -835,7 +836,7 @@ static void migrate_hrtimers(int cpu)
835} 836}
836#endif /* CONFIG_HOTPLUG_CPU */ 837#endif /* CONFIG_HOTPLUG_CPU */
837 838
838static int __devinit hrtimer_cpu_notify(struct notifier_block *self, 839static int hrtimer_cpu_notify(struct notifier_block *self,
839 unsigned long action, void *hcpu) 840 unsigned long action, void *hcpu)
840{ 841{
841 long cpu = (long)hcpu; 842 long cpu = (long)hcpu;
@@ -859,7 +860,7 @@ static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
859 return NOTIFY_OK; 860 return NOTIFY_OK;
860} 861}
861 862
862static struct notifier_block __devinitdata hrtimers_nb = { 863static struct notifier_block hrtimers_nb = {
863 .notifier_call = hrtimer_cpu_notify, 864 .notifier_call = hrtimer_cpu_notify,
864}; 865};
865 866
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 2b33f852be3e..9f77f50d8143 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,4 +1,5 @@
1 1
2obj-y := handle.o manage.o spurious.o migration.o 2obj-y := handle.o manage.o spurious.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index ac766ad573e8..1279e3499534 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -246,8 +246,10 @@ int setup_irq(unsigned int irq, struct irqaction * new)
246 246
247mismatch: 247mismatch:
248 spin_unlock_irqrestore(&desc->lock, flags); 248 spin_unlock_irqrestore(&desc->lock, flags);
249 printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__); 249 if (!(new->flags & SA_PROBEIRQ)) {
250 dump_stack(); 250 printk(KERN_ERR "%s: irq handler mismatch\n", __FUNCTION__);
251 dump_stack();
252 }
251 return -EBUSY; 253 return -EBUSY;
252} 254}
253 255
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 52a8655fa080..134f9f2e0e39 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,6 +1,5 @@
1#include <linux/irq.h>
2 1
3#if defined(CONFIG_GENERIC_PENDING_IRQ) 2#include <linux/irq.h>
4 3
5void set_pending_irq(unsigned int irq, cpumask_t mask) 4void set_pending_irq(unsigned int irq, cpumask_t mask)
6{ 5{
@@ -61,5 +60,3 @@ void move_native_irq(int irq)
61 } 60 }
62 cpus_clear(pending_irq_cpumask[irq]); 61 cpus_clear(pending_irq_cpumask[irq]);
63} 62}
64
65#endif
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1156eb0977d0..1fbf466a29aa 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -585,6 +585,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
585 int i; 585 int i;
586 586
587 rp->kp.pre_handler = pre_handler_kretprobe; 587 rp->kp.pre_handler = pre_handler_kretprobe;
588 rp->kp.post_handler = NULL;
589 rp->kp.fault_handler = NULL;
590 rp->kp.break_handler = NULL;
588 591
589 /* Pre-allocate memory for max kretprobe instances */ 592 /* Pre-allocate memory for max kretprobe instances */
590 if (rp->maxactive <= 0) { 593 if (rp->maxactive <= 0) {
diff --git a/kernel/panic.c b/kernel/panic.c
index f895c7c01d5b..cc2a4c9c36ac 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,6 @@ static int pause_on_oops_flag;
27static DEFINE_SPINLOCK(pause_on_oops_lock); 27static DEFINE_SPINLOCK(pause_on_oops_lock);
28 28
29int panic_timeout; 29int panic_timeout;
30EXPORT_SYMBOL(panic_timeout);
31 30
32ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 31ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
33 32
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 9fd8d4f03595..ce0dfb8f4a4e 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -41,7 +41,7 @@ config SOFTWARE_SUSPEND
41 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP) 41 depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
42 ---help--- 42 ---help---
43 Enable the possibility of suspending the machine. 43 Enable the possibility of suspending the machine.
44 It doesn't need APM. 44 It doesn't need ACPI or APM.
45 You may suspend your machine by 'swsusp' or 'shutdown -z <time>' 45 You may suspend your machine by 'swsusp' or 'shutdown -z <time>'
46 (patch for sysvinit needed). 46 (patch for sysvinit needed).
47 47
diff --git a/kernel/power/main.c b/kernel/power/main.c
index ee371f50ccaa..a6d9ef46009e 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -272,7 +272,7 @@ static ssize_t state_store(struct subsystem * subsys, const char * buf, size_t n
272 if (*s && !strncmp(buf, *s, len)) 272 if (*s && !strncmp(buf, *s, len))
273 break; 273 break;
274 } 274 }
275 if (*s) 275 if (state < PM_SUSPEND_MAX && *s)
276 error = enter_state(state); 276 error = enter_state(state);
277 else 277 else
278 error = -EINVAL; 278 error = -EINVAL;
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 0f6908cce1dd..84063ac8fcfc 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -75,25 +75,6 @@ struct pm_dev *pm_register(pm_dev_t type,
75 return dev; 75 return dev;
76} 76}
77 77
78/**
79 * pm_unregister - unregister a device with power management
80 * @dev: device to unregister
81 *
82 * Remove a device from the power management notification lists. The
83 * dev passed must be a handle previously returned by pm_register.
84 */
85
86void pm_unregister(struct pm_dev *dev)
87{
88 if (dev) {
89 mutex_lock(&pm_devs_lock);
90 list_del(&dev->entry);
91 mutex_unlock(&pm_devs_lock);
92
93 kfree(dev);
94 }
95}
96
97static void __pm_unregister(struct pm_dev *dev) 78static void __pm_unregister(struct pm_dev *dev)
98{ 79{
99 if (dev) { 80 if (dev) {
@@ -258,7 +239,6 @@ int pm_send_all(pm_request_t rqst, void *data)
258} 239}
259 240
260EXPORT_SYMBOL(pm_register); 241EXPORT_SYMBOL(pm_register);
261EXPORT_SYMBOL(pm_unregister);
262EXPORT_SYMBOL(pm_unregister_all); 242EXPORT_SYMBOL(pm_unregister_all);
263EXPORT_SYMBOL(pm_send_all); 243EXPORT_SYMBOL(pm_send_all);
264EXPORT_SYMBOL(pm_active); 244EXPORT_SYMBOL(pm_active);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c5863d02c89e..3eeedbb13b78 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -240,14 +240,15 @@ static void copy_data_pages(struct pbe *pblist)
240 * free_pagedir - free pages allocated with alloc_pagedir() 240 * free_pagedir - free pages allocated with alloc_pagedir()
241 */ 241 */
242 242
243static void free_pagedir(struct pbe *pblist) 243static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
244{ 244{
245 struct pbe *pbe; 245 struct pbe *pbe;
246 246
247 while (pblist) { 247 while (pblist) {
248 pbe = (pblist + PB_PAGE_SKIP)->next; 248 pbe = (pblist + PB_PAGE_SKIP)->next;
249 ClearPageNosave(virt_to_page(pblist)); 249 ClearPageNosave(virt_to_page(pblist));
250 ClearPageNosaveFree(virt_to_page(pblist)); 250 if (clear_nosave_free)
251 ClearPageNosaveFree(virt_to_page(pblist));
251 free_page((unsigned long)pblist); 252 free_page((unsigned long)pblist);
252 pblist = pbe; 253 pblist = pbe;
253 } 254 }
@@ -389,7 +390,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
389 pbe->next = alloc_image_page(gfp_mask, safe_needed); 390 pbe->next = alloc_image_page(gfp_mask, safe_needed);
390 } 391 }
391 if (!pbe) { /* get_zeroed_page() failed */ 392 if (!pbe) { /* get_zeroed_page() failed */
392 free_pagedir(pblist); 393 free_pagedir(pblist, 1);
393 pblist = NULL; 394 pblist = NULL;
394 } else 395 } else
395 create_pbe_list(pblist, nr_pages); 396 create_pbe_list(pblist, nr_pages);
@@ -736,7 +737,7 @@ static int create_image(struct snapshot_handle *handle)
736 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); 737 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
737 if (pblist) 738 if (pblist)
738 copy_page_backup_list(pblist, p); 739 copy_page_backup_list(pblist, p);
739 free_pagedir(p); 740 free_pagedir(p, 0);
740 if (!pblist) 741 if (!pblist)
741 error = -ENOMEM; 742 error = -ENOMEM;
742 } 743 }
diff --git a/kernel/printk.c b/kernel/printk.c
index 8cc19431e74b..c056f3324432 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -360,8 +360,7 @@ static void call_console_drivers(unsigned long start, unsigned long end)
360 unsigned long cur_index, start_print; 360 unsigned long cur_index, start_print;
361 static int msg_level = -1; 361 static int msg_level = -1;
362 362
363 if (((long)(start - end)) > 0) 363 BUG_ON(((long)(start - end)) > 0);
364 BUG();
365 364
366 cur_index = start; 365 cur_index = start;
367 start_print = start; 366 start_print = start;
@@ -708,8 +707,7 @@ int __init add_preferred_console(char *name, int idx, char *options)
708 */ 707 */
709void acquire_console_sem(void) 708void acquire_console_sem(void)
710{ 709{
711 if (in_interrupt()) 710 BUG_ON(in_interrupt());
712 BUG();
713 down(&console_sem); 711 down(&console_sem);
714 console_locked = 1; 712 console_locked = 1;
715 console_may_schedule = 1; 713 console_may_schedule = 1;
diff --git a/kernel/profile.c b/kernel/profile.c
index 5a730fdb1a2c..68afe121e507 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -299,7 +299,7 @@ out:
299} 299}
300 300
301#ifdef CONFIG_HOTPLUG_CPU 301#ifdef CONFIG_HOTPLUG_CPU
302static int __devinit profile_cpu_callback(struct notifier_block *info, 302static int profile_cpu_callback(struct notifier_block *info,
303 unsigned long action, void *__cpu) 303 unsigned long action, void *__cpu)
304{ 304{
305 int node, cpu = (unsigned long)__cpu; 305 int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 86a7f6c60cb2..4e0f0ec003f7 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -30,8 +30,7 @@
30 */ 30 */
31void __ptrace_link(task_t *child, task_t *new_parent) 31void __ptrace_link(task_t *child, task_t *new_parent)
32{ 32{
33 if (!list_empty(&child->ptrace_list)) 33 BUG_ON(!list_empty(&child->ptrace_list));
34 BUG();
35 if (child->parent == new_parent) 34 if (child->parent == new_parent)
36 return; 35 return;
37 list_add(&child->ptrace_list, &child->parent->ptrace_children); 36 list_add(&child->ptrace_list, &child->parent->ptrace_children);
@@ -57,10 +56,6 @@ void ptrace_untrace(task_t *child)
57 signal_wake_up(child, 1); 56 signal_wake_up(child, 1);
58 } 57 }
59 } 58 }
60 if (child->signal->flags & SIGNAL_GROUP_EXIT) {
61 sigaddset(&child->pending.signal, SIGKILL);
62 signal_wake_up(child, 1);
63 }
64 spin_unlock(&child->sighand->siglock); 59 spin_unlock(&child->sighand->siglock);
65} 60}
66 61
@@ -82,7 +77,8 @@ void __ptrace_unlink(task_t *child)
82 add_parent(child); 77 add_parent(child);
83 } 78 }
84 79
85 ptrace_untrace(child); 80 if (child->state == TASK_TRACED)
81 ptrace_untrace(child);
86} 82}
87 83
88/* 84/*
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 13458bbaa1be..6d32ff26f948 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -520,7 +520,7 @@ static void __devinit rcu_online_cpu(int cpu)
520 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL); 520 tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
521} 521}
522 522
523static int __devinit rcu_cpu_notify(struct notifier_block *self, 523static int rcu_cpu_notify(struct notifier_block *self,
524 unsigned long action, void *hcpu) 524 unsigned long action, void *hcpu)
525{ 525{
526 long cpu = (long)hcpu; 526 long cpu = (long)hcpu;
@@ -537,7 +537,7 @@ static int __devinit rcu_cpu_notify(struct notifier_block *self,
537 return NOTIFY_OK; 537 return NOTIFY_OK;
538} 538}
539 539
540static struct notifier_block __devinitdata rcu_nb = { 540static struct notifier_block rcu_nb = {
541 .notifier_call = rcu_cpu_notify, 541 .notifier_call = rcu_cpu_notify,
542}; 542};
543 543
diff --git a/kernel/sched.c b/kernel/sched.c
index dd153d6f8a04..4c64f85698ae 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -665,13 +665,55 @@ static int effective_prio(task_t *p)
665} 665}
666 666
667/* 667/*
668 * We place interactive tasks back into the active array, if possible.
669 *
670 * To guarantee that this does not starve expired tasks we ignore the
671 * interactivity of a task if the first expired task had to wait more
672 * than a 'reasonable' amount of time. This deadline timeout is
673 * load-dependent, as the frequency of array switched decreases with
674 * increasing number of running tasks. We also ignore the interactivity
675 * if a better static_prio task has expired, and switch periodically
676 * regardless, to ensure that highly interactive tasks do not starve
677 * the less fortunate for unreasonably long periods.
678 */
679static inline int expired_starving(runqueue_t *rq)
680{
681 int limit;
682
683 /*
684 * Arrays were recently switched, all is well
685 */
686 if (!rq->expired_timestamp)
687 return 0;
688
689 limit = STARVATION_LIMIT * rq->nr_running;
690
691 /*
692 * It's time to switch arrays
693 */
694 if (jiffies - rq->expired_timestamp >= limit)
695 return 1;
696
697 /*
698 * There's a better selection in the expired array
699 */
700 if (rq->curr->static_prio > rq->best_expired_prio)
701 return 1;
702
703 /*
704 * All is well
705 */
706 return 0;
707}
708
709/*
668 * __activate_task - move a task to the runqueue. 710 * __activate_task - move a task to the runqueue.
669 */ 711 */
670static void __activate_task(task_t *p, runqueue_t *rq) 712static void __activate_task(task_t *p, runqueue_t *rq)
671{ 713{
672 prio_array_t *target = rq->active; 714 prio_array_t *target = rq->active;
673 715
674 if (batch_task(p)) 716 if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p))))
675 target = rq->expired; 717 target = rq->expired;
676 enqueue_task(p, target); 718 enqueue_task(p, target);
677 rq->nr_running++; 719 rq->nr_running++;
@@ -2490,22 +2532,6 @@ unsigned long long current_sched_time(const task_t *tsk)
2490} 2532}
2491 2533
2492/* 2534/*
2493 * We place interactive tasks back into the active array, if possible.
2494 *
2495 * To guarantee that this does not starve expired tasks we ignore the
2496 * interactivity of a task if the first expired task had to wait more
2497 * than a 'reasonable' amount of time. This deadline timeout is
2498 * load-dependent, as the frequency of array switched decreases with
2499 * increasing number of running tasks. We also ignore the interactivity
2500 * if a better static_prio task has expired:
2501 */
2502#define EXPIRED_STARVING(rq) \
2503 ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
2504 (jiffies - (rq)->expired_timestamp >= \
2505 STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
2506 ((rq)->curr->static_prio > (rq)->best_expired_prio))
2507
2508/*
2509 * Account user cpu time to a process. 2535 * Account user cpu time to a process.
2510 * @p: the process that the cpu time gets accounted to 2536 * @p: the process that the cpu time gets accounted to
2511 * @hardirq_offset: the offset to subtract from hardirq_count() 2537 * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2640,7 +2666,7 @@ void scheduler_tick(void)
2640 2666
2641 if (!rq->expired_timestamp) 2667 if (!rq->expired_timestamp)
2642 rq->expired_timestamp = jiffies; 2668 rq->expired_timestamp = jiffies;
2643 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { 2669 if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
2644 enqueue_task(p, rq->expired); 2670 enqueue_task(p, rq->expired);
2645 if (p->static_prio < rq->best_expired_prio) 2671 if (p->static_prio < rq->best_expired_prio)
2646 rq->best_expired_prio = p->static_prio; 2672 rq->best_expired_prio = p->static_prio;
@@ -4788,7 +4814,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
4788/* Register at highest priority so that task migration (migrate_all_tasks) 4814/* Register at highest priority so that task migration (migrate_all_tasks)
4789 * happens before everything else. 4815 * happens before everything else.
4790 */ 4816 */
4791static struct notifier_block __devinitdata migration_notifier = { 4817static struct notifier_block migration_notifier = {
4792 .notifier_call = migration_call, 4818 .notifier_call = migration_call,
4793 .priority = 10 4819 .priority = 10
4794}; 4820};
diff --git a/kernel/signal.c b/kernel/signal.c
index 92025b108791..e5f8aea78ffe 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -769,8 +769,7 @@ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
769{ 769{
770 int ret = 0; 770 int ret = 0;
771 771
772 if (!irqs_disabled()) 772 BUG_ON(!irqs_disabled());
773 BUG();
774 assert_spin_locked(&t->sighand->siglock); 773 assert_spin_locked(&t->sighand->siglock);
775 774
776 /* Short-circuit ignored signals. */ 775 /* Short-circuit ignored signals. */
@@ -869,7 +868,6 @@ __group_complete_signal(int sig, struct task_struct *p)
869 if (t == NULL) 868 if (t == NULL)
870 /* restart balancing at this thread */ 869 /* restart balancing at this thread */
871 t = p->signal->curr_target = p; 870 t = p->signal->curr_target = p;
872 BUG_ON(t->tgid != p->tgid);
873 871
874 while (!wants_signal(sig, t)) { 872 while (!wants_signal(sig, t)) {
875 t = next_thread(t); 873 t = next_thread(t);
@@ -1384,8 +1382,7 @@ send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1384 * the overrun count. Other uses should not try to 1382 * the overrun count. Other uses should not try to
1385 * send the signal multiple times. 1383 * send the signal multiple times.
1386 */ 1384 */
1387 if (q->info.si_code != SI_TIMER) 1385 BUG_ON(q->info.si_code != SI_TIMER);
1388 BUG();
1389 q->info.si_overrun++; 1386 q->info.si_overrun++;
1390 goto out; 1387 goto out;
1391 } 1388 }
@@ -1757,9 +1754,9 @@ relock:
1757 /* Let the debugger run. */ 1754 /* Let the debugger run. */
1758 ptrace_stop(signr, signr, info); 1755 ptrace_stop(signr, signr, info);
1759 1756
1760 /* We're back. Did the debugger cancel the sig or group_exit? */ 1757 /* We're back. Did the debugger cancel the sig? */
1761 signr = current->exit_code; 1758 signr = current->exit_code;
1762 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT) 1759 if (signr == 0)
1763 continue; 1760 continue;
1764 1761
1765 current->exit_code = 0; 1762 current->exit_code = 0;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ec8fed42a86f..336f92d64e2e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
446} 446}
447#endif /* CONFIG_HOTPLUG_CPU */ 447#endif /* CONFIG_HOTPLUG_CPU */
448 448
449static int __devinit cpu_callback(struct notifier_block *nfb, 449static int cpu_callback(struct notifier_block *nfb,
450 unsigned long action, 450 unsigned long action,
451 void *hcpu) 451 void *hcpu)
452{ 452{
@@ -484,7 +484,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
484 return NOTIFY_OK; 484 return NOTIFY_OK;
485} 485}
486 486
487static struct notifier_block __devinitdata cpu_nfb = { 487static struct notifier_block cpu_nfb = {
488 .notifier_call = cpu_callback 488 .notifier_call = cpu_callback
489}; 489};
490 490
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index ced91e1ff564..14c7faf02909 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
104/* 104/*
105 * Create/destroy watchdog threads as CPUs come and go: 105 * Create/destroy watchdog threads as CPUs come and go:
106 */ 106 */
107static int __devinit 107static int
108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 108cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
109{ 109{
110 int hotcpu = (unsigned long)hcpu; 110 int hotcpu = (unsigned long)hcpu;
@@ -140,7 +140,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
140 return NOTIFY_OK; 140 return NOTIFY_OK;
141} 141}
142 142
143static struct notifier_block __devinitdata cpu_nfb = { 143static struct notifier_block cpu_nfb = {
144 .notifier_call = cpu_callback 144 .notifier_call = cpu_callback
145}; 145};
146 146
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d82864c4a617..5433195040f1 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -120,3 +120,15 @@ cond_syscall(sys32_sysctl);
120cond_syscall(ppc_rtas); 120cond_syscall(ppc_rtas);
121cond_syscall(sys_spu_run); 121cond_syscall(sys_spu_run);
122cond_syscall(sys_spu_create); 122cond_syscall(sys_spu_create);
123
124/* mmu depending weak syscall entries */
125cond_syscall(sys_mprotect);
126cond_syscall(sys_msync);
127cond_syscall(sys_mlock);
128cond_syscall(sys_munlock);
129cond_syscall(sys_mlockall);
130cond_syscall(sys_munlockall);
131cond_syscall(sys_mincore);
132cond_syscall(sys_madvise);
133cond_syscall(sys_mremap);
134cond_syscall(sys_remap_file_pages);
diff --git a/kernel/time.c b/kernel/time.c
index ff8e7019c4c4..b00ddc71cedb 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -410,7 +410,7 @@ EXPORT_SYMBOL(current_kernel_time);
410 * current_fs_time - Return FS time 410 * current_fs_time - Return FS time
411 * @sb: Superblock. 411 * @sb: Superblock.
412 * 412 *
413 * Return the current time truncated to the time granuality supported by 413 * Return the current time truncated to the time granularity supported by
414 * the fs. 414 * the fs.
415 */ 415 */
416struct timespec current_fs_time(struct super_block *sb) 416struct timespec current_fs_time(struct super_block *sb)
@@ -421,11 +421,11 @@ struct timespec current_fs_time(struct super_block *sb)
421EXPORT_SYMBOL(current_fs_time); 421EXPORT_SYMBOL(current_fs_time);
422 422
423/** 423/**
424 * timespec_trunc - Truncate timespec to a granuality 424 * timespec_trunc - Truncate timespec to a granularity
425 * @t: Timespec 425 * @t: Timespec
426 * @gran: Granuality in ns. 426 * @gran: Granularity in ns.
427 * 427 *
428 * Truncate a timespec to a granuality. gran must be smaller than a second. 428 * Truncate a timespec to a granularity. gran must be smaller than a second.
429 * Always rounds down. 429 * Always rounds down.
430 * 430 *
431 * This function should be only used for timestamps returned by 431 * This function should be only used for timestamps returned by
diff --git a/kernel/timer.c b/kernel/timer.c
index 6b812c04737b..67eaf0f54096 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -81,9 +81,10 @@ struct tvec_t_base_s {
81} ____cacheline_aligned_in_smp; 81} ____cacheline_aligned_in_smp;
82 82
83typedef struct tvec_t_base_s tvec_base_t; 83typedef struct tvec_t_base_s tvec_base_t;
84static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); 84
85tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases); 86EXPORT_SYMBOL(boot_tvec_bases);
87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
87 88
88static inline void set_running_timer(tvec_base_t *base, 89static inline void set_running_timer(tvec_base_t *base,
89 struct timer_list *timer) 90 struct timer_list *timer)
@@ -1224,28 +1225,36 @@ static int __devinit init_timers_cpu(int cpu)
1224{ 1225{
1225 int j; 1226 int j;
1226 tvec_base_t *base; 1227 tvec_base_t *base;
1228 static char __devinitdata tvec_base_done[NR_CPUS];
1227 1229
1228 base = per_cpu(tvec_bases, cpu); 1230 if (!tvec_base_done[cpu]) {
1229 if (!base) {
1230 static char boot_done; 1231 static char boot_done;
1231 1232
1232 /*
1233 * Cannot do allocation in init_timers as that runs before the
1234 * allocator initializes (and would waste memory if there are
1235 * more possible CPUs than will ever be installed/brought up).
1236 */
1237 if (boot_done) { 1233 if (boot_done) {
1234 /*
1235 * The APs use this path later in boot
1236 */
1238 base = kmalloc_node(sizeof(*base), GFP_KERNEL, 1237 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1239 cpu_to_node(cpu)); 1238 cpu_to_node(cpu));
1240 if (!base) 1239 if (!base)
1241 return -ENOMEM; 1240 return -ENOMEM;
1242 memset(base, 0, sizeof(*base)); 1241 memset(base, 0, sizeof(*base));
1242 per_cpu(tvec_bases, cpu) = base;
1243 } else { 1243 } else {
1244 base = &boot_tvec_bases; 1244 /*
1245 * This is for the boot CPU - we use compile-time
1246 * static initialisation because per-cpu memory isn't
1247 * ready yet and because the memory allocators are not
1248 * initialised either.
1249 */
1245 boot_done = 1; 1250 boot_done = 1;
1251 base = &boot_tvec_bases;
1246 } 1252 }
1247 per_cpu(tvec_bases, cpu) = base; 1253 tvec_base_done[cpu] = 1;
1254 } else {
1255 base = per_cpu(tvec_bases, cpu);
1248 } 1256 }
1257
1249 spin_lock_init(&base->lock); 1258 spin_lock_init(&base->lock);
1250 for (j = 0; j < TVN_SIZE; j++) { 1259 for (j = 0; j < TVN_SIZE; j++) {
1251 INIT_LIST_HEAD(base->tv5.vec + j); 1260 INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1305,7 +1314,7 @@ static void __devinit migrate_timers(int cpu)
1305} 1314}
1306#endif /* CONFIG_HOTPLUG_CPU */ 1315#endif /* CONFIG_HOTPLUG_CPU */
1307 1316
1308static int __devinit timer_cpu_notify(struct notifier_block *self, 1317static int timer_cpu_notify(struct notifier_block *self,
1309 unsigned long action, void *hcpu) 1318 unsigned long action, void *hcpu)
1310{ 1319{
1311 long cpu = (long)hcpu; 1320 long cpu = (long)hcpu;
@@ -1325,7 +1334,7 @@ static int __devinit timer_cpu_notify(struct notifier_block *self,
1325 return NOTIFY_OK; 1334 return NOTIFY_OK;
1326} 1335}
1327 1336
1328static struct notifier_block __devinitdata timers_nb = { 1337static struct notifier_block timers_nb = {
1329 .notifier_call = timer_cpu_notify, 1338 .notifier_call = timer_cpu_notify,
1330}; 1339};
1331 1340
@@ -1455,7 +1464,7 @@ static void time_interpolator_update(long delta_nsec)
1455 */ 1464 */
1456 if (jiffies % INTERPOLATOR_ADJUST == 0) 1465 if (jiffies % INTERPOLATOR_ADJUST == 0)
1457 { 1466 {
1458 if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC) 1467 if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
1459 time_interpolator->nsec_per_cyc--; 1468 time_interpolator->nsec_per_cyc--;
1460 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) 1469 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1461 time_interpolator->nsec_per_cyc++; 1470 time_interpolator->nsec_per_cyc++;
@@ -1479,8 +1488,7 @@ register_time_interpolator(struct time_interpolator *ti)
1479 unsigned long flags; 1488 unsigned long flags;
1480 1489
1481 /* Sanity check */ 1490 /* Sanity check */
1482 if (ti->frequency == 0 || ti->mask == 0) 1491 BUG_ON(ti->frequency == 0 || ti->mask == 0);
1483 BUG();
1484 1492
1485 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; 1493 ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
1486 spin_lock(&time_interpolator_lock); 1494 spin_lock(&time_interpolator_lock);
diff --git a/kernel/uid16.c b/kernel/uid16.c
index aa25605027c8..187e2a423878 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -20,43 +20,67 @@
20 20
21asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group) 21asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group)
22{ 22{
23 return sys_chown(filename, low2highuid(user), low2highgid(group)); 23 long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
24 /* avoid REGPARM breakage on x86: */
25 prevent_tail_call(ret);
26 return ret;
24} 27}
25 28
26asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group) 29asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group)
27{ 30{
28 return sys_lchown(filename, low2highuid(user), low2highgid(group)); 31 long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
32 /* avoid REGPARM breakage on x86: */
33 prevent_tail_call(ret);
34 return ret;
29} 35}
30 36
31asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group) 37asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
32{ 38{
33 return sys_fchown(fd, low2highuid(user), low2highgid(group)); 39 long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
40 /* avoid REGPARM breakage on x86: */
41 prevent_tail_call(ret);
42 return ret;
34} 43}
35 44
36asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid) 45asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
37{ 46{
38 return sys_setregid(low2highgid(rgid), low2highgid(egid)); 47 long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
48 /* avoid REGPARM breakage on x86: */
49 prevent_tail_call(ret);
50 return ret;
39} 51}
40 52
41asmlinkage long sys_setgid16(old_gid_t gid) 53asmlinkage long sys_setgid16(old_gid_t gid)
42{ 54{
43 return sys_setgid(low2highgid(gid)); 55 long ret = sys_setgid(low2highgid(gid));
56 /* avoid REGPARM breakage on x86: */
57 prevent_tail_call(ret);
58 return ret;
44} 59}
45 60
46asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid) 61asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
47{ 62{
48 return sys_setreuid(low2highuid(ruid), low2highuid(euid)); 63 long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
64 /* avoid REGPARM breakage on x86: */
65 prevent_tail_call(ret);
66 return ret;
49} 67}
50 68
51asmlinkage long sys_setuid16(old_uid_t uid) 69asmlinkage long sys_setuid16(old_uid_t uid)
52{ 70{
53 return sys_setuid(low2highuid(uid)); 71 long ret = sys_setuid(low2highuid(uid));
72 /* avoid REGPARM breakage on x86: */
73 prevent_tail_call(ret);
74 return ret;
54} 75}
55 76
56asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) 77asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
57{ 78{
58 return sys_setresuid(low2highuid(ruid), low2highuid(euid), 79 long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
59 low2highuid(suid)); 80 low2highuid(suid));
81 /* avoid REGPARM breakage on x86: */
82 prevent_tail_call(ret);
83 return ret;
60} 84}
61 85
62asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) 86asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
@@ -72,8 +96,11 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid,
72 96
73asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) 97asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
74{ 98{
75 return sys_setresgid(low2highgid(rgid), low2highgid(egid), 99 long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
76 low2highgid(sgid)); 100 low2highgid(sgid));
101 /* avoid REGPARM breakage on x86: */
102 prevent_tail_call(ret);
103 return ret;
77} 104}
78 105
79asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) 106asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
@@ -89,12 +116,18 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid,
89 116
90asmlinkage long sys_setfsuid16(old_uid_t uid) 117asmlinkage long sys_setfsuid16(old_uid_t uid)
91{ 118{
92 return sys_setfsuid(low2highuid(uid)); 119 long ret = sys_setfsuid(low2highuid(uid));
120 /* avoid REGPARM breakage on x86: */
121 prevent_tail_call(ret);
122 return ret;
93} 123}
94 124
95asmlinkage long sys_setfsgid16(old_gid_t gid) 125asmlinkage long sys_setfsgid16(old_gid_t gid)
96{ 126{
97 return sys_setfsgid(low2highgid(gid)); 127 long ret = sys_setfsgid(low2highgid(gid));
128 /* avoid REGPARM breakage on x86: */
129 prevent_tail_call(ret);
130 return ret;
98} 131}
99 132
100static int groups16_to_user(old_gid_t __user *grouplist, 133static int groups16_to_user(old_gid_t __user *grouplist,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e9e464a90376..880fb415a8f6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -547,7 +547,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
547} 547}
548 548
549/* We're holding the cpucontrol mutex here */ 549/* We're holding the cpucontrol mutex here */
550static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, 550static int workqueue_cpu_callback(struct notifier_block *nfb,
551 unsigned long action, 551 unsigned long action,
552 void *hcpu) 552 void *hcpu)
553{ 553{