aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-04-21 12:52:36 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-04-21 12:52:36 -0400
commita748422ee45725e04e1d3792fa19dfa90ddfd116 (patch)
tree978e12895468baaa9f7ab2747b9f7d50beaf1717 /kernel
parentc63e31c2cc1ec67372920b5e1aff8204d04dd172 (diff)
parentf4ffaa452e71495a06376f12f772342bc57051fc (diff)
Merge branch 'master'
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/fork.c9
-rw-r--r--kernel/hrtimer.c1
-rw-r--r--kernel/irq/Makefile3
-rw-r--r--kernel/irq/migration.c5
-rw-r--r--kernel/kprobes.c3
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/pm.c20
-rw-r--r--kernel/power/snapshot.c9
-rw-r--r--kernel/ptrace.c7
-rw-r--r--kernel/sched.c62
-rw-r--r--kernel/signal.c5
-rw-r--r--kernel/sys_ni.c12
-rw-r--r--kernel/timer.c31
-rw-r--r--kernel/uid16.c59
15 files changed, 145 insertions, 88 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 6c2eeb8f6390..f86434d7b3d1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -34,6 +34,7 @@
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/futex.h> 35#include <linux/futex.h>
36#include <linux/compat.h> 36#include <linux/compat.h>
37#include <linux/pipe_fs_i.h>
37 38
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/unistd.h> 40#include <asm/unistd.h>
@@ -55,7 +56,7 @@ static void __unhash_process(struct task_struct *p)
55 detach_pid(p, PIDTYPE_PGID); 56 detach_pid(p, PIDTYPE_PGID);
56 detach_pid(p, PIDTYPE_SID); 57 detach_pid(p, PIDTYPE_SID);
57 58
58 list_del_init(&p->tasks); 59 list_del_rcu(&p->tasks);
59 __get_cpu_var(process_counts)--; 60 __get_cpu_var(process_counts)--;
60 } 61 }
61 list_del_rcu(&p->thread_group); 62 list_del_rcu(&p->thread_group);
@@ -941,6 +942,9 @@ fastcall NORET_TYPE void do_exit(long code)
941 if (tsk->io_context) 942 if (tsk->io_context)
942 exit_io_context(); 943 exit_io_context();
943 944
945 if (tsk->splice_pipe)
946 __free_pipe_info(tsk->splice_pipe);
947
944 /* PF_DEAD causes final put_task_struct after we schedule. */ 948 /* PF_DEAD causes final put_task_struct after we schedule. */
945 preempt_disable(); 949 preempt_disable();
946 BUG_ON(tsk->flags & PF_DEAD); 950 BUG_ON(tsk->flags & PF_DEAD);
diff --git a/kernel/fork.c b/kernel/fork.c
index 3384eb89cb1c..d2fa57d480d4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -124,12 +124,6 @@ void __put_task_struct(struct task_struct *tsk)
124 free_task(tsk); 124 free_task(tsk);
125} 125}
126 126
127void __put_task_struct_cb(struct rcu_head *rhp)
128{
129 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
130 __put_task_struct(tsk);
131}
132
133void __init fork_init(unsigned long mempages) 127void __init fork_init(unsigned long mempages)
134{ 128{
135#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR 129#ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
@@ -186,6 +180,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
186 atomic_set(&tsk->usage,2); 180 atomic_set(&tsk->usage,2);
187 atomic_set(&tsk->fs_excl, 0); 181 atomic_set(&tsk->fs_excl, 0);
188 tsk->btrace_seq = 0; 182 tsk->btrace_seq = 0;
183 tsk->splice_pipe = NULL;
189 return tsk; 184 return tsk;
190} 185}
191 186
@@ -1210,7 +1205,7 @@ static task_t *copy_process(unsigned long clone_flags,
1210 attach_pid(p, PIDTYPE_PGID, process_group(p)); 1205 attach_pid(p, PIDTYPE_PGID, process_group(p));
1211 attach_pid(p, PIDTYPE_SID, p->signal->session); 1206 attach_pid(p, PIDTYPE_SID, p->signal->session);
1212 1207
1213 list_add_tail(&p->tasks, &init_task.tasks); 1208 list_add_tail_rcu(&p->tasks, &init_task.tasks);
1214 __get_cpu_var(process_counts)++; 1209 __get_cpu_var(process_counts)++;
1215 } 1210 }
1216 attach_pid(p, PIDTYPE_PID, p->pid); 1211 attach_pid(p, PIDTYPE_PID, p->pid);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f181ff4dd32e..d2a7296c8251 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -501,6 +501,7 @@ int hrtimer_cancel(struct hrtimer *timer)
501 501
502 if (ret >= 0) 502 if (ret >= 0)
503 return ret; 503 return ret;
504 cpu_relax();
504 } 505 }
505} 506}
506 507
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 2b33f852be3e..9f77f50d8143 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,4 +1,5 @@
1 1
2obj-y := handle.o manage.o spurious.o migration.o 2obj-y := handle.o manage.o spurious.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 52a8655fa080..134f9f2e0e39 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -1,6 +1,5 @@
1#include <linux/irq.h>
2 1
3#if defined(CONFIG_GENERIC_PENDING_IRQ) 2#include <linux/irq.h>
4 3
5void set_pending_irq(unsigned int irq, cpumask_t mask) 4void set_pending_irq(unsigned int irq, cpumask_t mask)
6{ 5{
@@ -61,5 +60,3 @@ void move_native_irq(int irq)
61 } 60 }
62 cpus_clear(pending_irq_cpumask[irq]); 61 cpus_clear(pending_irq_cpumask[irq]);
63} 62}
64
65#endif
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1156eb0977d0..1fbf466a29aa 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -585,6 +585,9 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
585 int i; 585 int i;
586 586
587 rp->kp.pre_handler = pre_handler_kretprobe; 587 rp->kp.pre_handler = pre_handler_kretprobe;
588 rp->kp.post_handler = NULL;
589 rp->kp.fault_handler = NULL;
590 rp->kp.break_handler = NULL;
588 591
589 /* Pre-allocate memory for max kretprobe instances */ 592 /* Pre-allocate memory for max kretprobe instances */
590 if (rp->maxactive <= 0) { 593 if (rp->maxactive <= 0) {
diff --git a/kernel/panic.c b/kernel/panic.c
index f895c7c01d5b..cc2a4c9c36ac 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,6 @@ static int pause_on_oops_flag;
27static DEFINE_SPINLOCK(pause_on_oops_lock); 27static DEFINE_SPINLOCK(pause_on_oops_lock);
28 28
29int panic_timeout; 29int panic_timeout;
30EXPORT_SYMBOL(panic_timeout);
31 30
32ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 31ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
33 32
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
index 0f6908cce1dd..84063ac8fcfc 100644
--- a/kernel/power/pm.c
+++ b/kernel/power/pm.c
@@ -75,25 +75,6 @@ struct pm_dev *pm_register(pm_dev_t type,
75 return dev; 75 return dev;
76} 76}
77 77
78/**
79 * pm_unregister - unregister a device with power management
80 * @dev: device to unregister
81 *
82 * Remove a device from the power management notification lists. The
83 * dev passed must be a handle previously returned by pm_register.
84 */
85
86void pm_unregister(struct pm_dev *dev)
87{
88 if (dev) {
89 mutex_lock(&pm_devs_lock);
90 list_del(&dev->entry);
91 mutex_unlock(&pm_devs_lock);
92
93 kfree(dev);
94 }
95}
96
97static void __pm_unregister(struct pm_dev *dev) 78static void __pm_unregister(struct pm_dev *dev)
98{ 79{
99 if (dev) { 80 if (dev) {
@@ -258,7 +239,6 @@ int pm_send_all(pm_request_t rqst, void *data)
258} 239}
259 240
260EXPORT_SYMBOL(pm_register); 241EXPORT_SYMBOL(pm_register);
261EXPORT_SYMBOL(pm_unregister);
262EXPORT_SYMBOL(pm_unregister_all); 242EXPORT_SYMBOL(pm_unregister_all);
263EXPORT_SYMBOL(pm_send_all); 243EXPORT_SYMBOL(pm_send_all);
264EXPORT_SYMBOL(pm_active); 244EXPORT_SYMBOL(pm_active);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c5863d02c89e..3eeedbb13b78 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -240,14 +240,15 @@ static void copy_data_pages(struct pbe *pblist)
240 * free_pagedir - free pages allocated with alloc_pagedir() 240 * free_pagedir - free pages allocated with alloc_pagedir()
241 */ 241 */
242 242
243static void free_pagedir(struct pbe *pblist) 243static void free_pagedir(struct pbe *pblist, int clear_nosave_free)
244{ 244{
245 struct pbe *pbe; 245 struct pbe *pbe;
246 246
247 while (pblist) { 247 while (pblist) {
248 pbe = (pblist + PB_PAGE_SKIP)->next; 248 pbe = (pblist + PB_PAGE_SKIP)->next;
249 ClearPageNosave(virt_to_page(pblist)); 249 ClearPageNosave(virt_to_page(pblist));
250 ClearPageNosaveFree(virt_to_page(pblist)); 250 if (clear_nosave_free)
251 ClearPageNosaveFree(virt_to_page(pblist));
251 free_page((unsigned long)pblist); 252 free_page((unsigned long)pblist);
252 pblist = pbe; 253 pblist = pbe;
253 } 254 }
@@ -389,7 +390,7 @@ struct pbe *alloc_pagedir(unsigned int nr_pages, gfp_t gfp_mask, int safe_needed
389 pbe->next = alloc_image_page(gfp_mask, safe_needed); 390 pbe->next = alloc_image_page(gfp_mask, safe_needed);
390 } 391 }
391 if (!pbe) { /* get_zeroed_page() failed */ 392 if (!pbe) { /* get_zeroed_page() failed */
392 free_pagedir(pblist); 393 free_pagedir(pblist, 1);
393 pblist = NULL; 394 pblist = NULL;
394 } else 395 } else
395 create_pbe_list(pblist, nr_pages); 396 create_pbe_list(pblist, nr_pages);
@@ -736,7 +737,7 @@ static int create_image(struct snapshot_handle *handle)
736 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1); 737 pblist = alloc_pagedir(nr_copy_pages, GFP_ATOMIC, 1);
737 if (pblist) 738 if (pblist)
738 copy_page_backup_list(pblist, p); 739 copy_page_backup_list(pblist, p);
739 free_pagedir(p); 740 free_pagedir(p, 0);
740 if (!pblist) 741 if (!pblist)
741 error = -ENOMEM; 742 error = -ENOMEM;
742 } 743 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 0eeb7e66722c..4e0f0ec003f7 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -56,10 +56,6 @@ void ptrace_untrace(task_t *child)
56 signal_wake_up(child, 1); 56 signal_wake_up(child, 1);
57 } 57 }
58 } 58 }
59 if (child->signal->flags & SIGNAL_GROUP_EXIT) {
60 sigaddset(&child->pending.signal, SIGKILL);
61 signal_wake_up(child, 1);
62 }
63 spin_unlock(&child->sighand->siglock); 59 spin_unlock(&child->sighand->siglock);
64} 60}
65 61
@@ -81,7 +77,8 @@ void __ptrace_unlink(task_t *child)
81 add_parent(child); 77 add_parent(child);
82 } 78 }
83 79
84 ptrace_untrace(child); 80 if (child->state == TASK_TRACED)
81 ptrace_untrace(child);
85} 82}
86 83
87/* 84/*
diff --git a/kernel/sched.c b/kernel/sched.c
index dd153d6f8a04..365f0b90b4de 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -665,13 +665,55 @@ static int effective_prio(task_t *p)
665} 665}
666 666
667/* 667/*
668 * We place interactive tasks back into the active array, if possible.
669 *
670 * To guarantee that this does not starve expired tasks we ignore the
671 * interactivity of a task if the first expired task had to wait more
672 * than a 'reasonable' amount of time. This deadline timeout is
673 * load-dependent, as the frequency of array switched decreases with
674 * increasing number of running tasks. We also ignore the interactivity
675 * if a better static_prio task has expired, and switch periodically
676 * regardless, to ensure that highly interactive tasks do not starve
677 * the less fortunate for unreasonably long periods.
678 */
679static inline int expired_starving(runqueue_t *rq)
680{
681 int limit;
682
683 /*
684 * Arrays were recently switched, all is well
685 */
686 if (!rq->expired_timestamp)
687 return 0;
688
689 limit = STARVATION_LIMIT * rq->nr_running;
690
691 /*
692 * It's time to switch arrays
693 */
694 if (jiffies - rq->expired_timestamp >= limit)
695 return 1;
696
697 /*
698 * There's a better selection in the expired array
699 */
700 if (rq->curr->static_prio > rq->best_expired_prio)
701 return 1;
702
703 /*
704 * All is well
705 */
706 return 0;
707}
708
709/*
668 * __activate_task - move a task to the runqueue. 710 * __activate_task - move a task to the runqueue.
669 */ 711 */
670static void __activate_task(task_t *p, runqueue_t *rq) 712static void __activate_task(task_t *p, runqueue_t *rq)
671{ 713{
672 prio_array_t *target = rq->active; 714 prio_array_t *target = rq->active;
673 715
674 if (batch_task(p)) 716 if (unlikely(batch_task(p) || (expired_starving(rq) && !rt_task(p))))
675 target = rq->expired; 717 target = rq->expired;
676 enqueue_task(p, target); 718 enqueue_task(p, target);
677 rq->nr_running++; 719 rq->nr_running++;
@@ -2490,22 +2532,6 @@ unsigned long long current_sched_time(const task_t *tsk)
2490} 2532}
2491 2533
2492/* 2534/*
2493 * We place interactive tasks back into the active array, if possible.
2494 *
2495 * To guarantee that this does not starve expired tasks we ignore the
2496 * interactivity of a task if the first expired task had to wait more
2497 * than a 'reasonable' amount of time. This deadline timeout is
2498 * load-dependent, as the frequency of array switched decreases with
2499 * increasing number of running tasks. We also ignore the interactivity
2500 * if a better static_prio task has expired:
2501 */
2502#define EXPIRED_STARVING(rq) \
2503 ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
2504 (jiffies - (rq)->expired_timestamp >= \
2505 STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
2506 ((rq)->curr->static_prio > (rq)->best_expired_prio))
2507
2508/*
2509 * Account user cpu time to a process. 2535 * Account user cpu time to a process.
2510 * @p: the process that the cpu time gets accounted to 2536 * @p: the process that the cpu time gets accounted to
2511 * @hardirq_offset: the offset to subtract from hardirq_count() 2537 * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -2640,7 +2666,7 @@ void scheduler_tick(void)
2640 2666
2641 if (!rq->expired_timestamp) 2667 if (!rq->expired_timestamp)
2642 rq->expired_timestamp = jiffies; 2668 rq->expired_timestamp = jiffies;
2643 if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) { 2669 if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
2644 enqueue_task(p, rq->expired); 2670 enqueue_task(p, rq->expired);
2645 if (p->static_prio < rq->best_expired_prio) 2671 if (p->static_prio < rq->best_expired_prio)
2646 rq->best_expired_prio = p->static_prio; 2672 rq->best_expired_prio = p->static_prio;
diff --git a/kernel/signal.c b/kernel/signal.c
index 5ccaac505e8d..e5f8aea78ffe 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -868,7 +868,6 @@ __group_complete_signal(int sig, struct task_struct *p)
868 if (t == NULL) 868 if (t == NULL)
869 /* restart balancing at this thread */ 869 /* restart balancing at this thread */
870 t = p->signal->curr_target = p; 870 t = p->signal->curr_target = p;
871 BUG_ON(t->tgid != p->tgid);
872 871
873 while (!wants_signal(sig, t)) { 872 while (!wants_signal(sig, t)) {
874 t = next_thread(t); 873 t = next_thread(t);
@@ -1755,9 +1754,9 @@ relock:
1755 /* Let the debugger run. */ 1754 /* Let the debugger run. */
1756 ptrace_stop(signr, signr, info); 1755 ptrace_stop(signr, signr, info);
1757 1756
1758 /* We're back. Did the debugger cancel the sig or group_exit? */ 1757 /* We're back. Did the debugger cancel the sig? */
1759 signr = current->exit_code; 1758 signr = current->exit_code;
1760 if (signr == 0 || current->signal->flags & SIGNAL_GROUP_EXIT) 1759 if (signr == 0)
1761 continue; 1760 continue;
1762 1761
1763 current->exit_code = 0; 1762 current->exit_code = 0;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index d82864c4a617..5433195040f1 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -120,3 +120,15 @@ cond_syscall(sys32_sysctl);
120cond_syscall(ppc_rtas); 120cond_syscall(ppc_rtas);
121cond_syscall(sys_spu_run); 121cond_syscall(sys_spu_run);
122cond_syscall(sys_spu_create); 122cond_syscall(sys_spu_create);
123
124/* mmu depending weak syscall entries */
125cond_syscall(sys_mprotect);
126cond_syscall(sys_msync);
127cond_syscall(sys_mlock);
128cond_syscall(sys_munlock);
129cond_syscall(sys_mlockall);
130cond_syscall(sys_munlockall);
131cond_syscall(sys_mincore);
132cond_syscall(sys_madvise);
133cond_syscall(sys_mremap);
134cond_syscall(sys_remap_file_pages);
diff --git a/kernel/timer.c b/kernel/timer.c
index c3a874f1393c..883773788836 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -81,9 +81,10 @@ struct tvec_t_base_s {
81} ____cacheline_aligned_in_smp; 81} ____cacheline_aligned_in_smp;
82 82
83typedef struct tvec_t_base_s tvec_base_t; 83typedef struct tvec_t_base_s tvec_base_t;
84static DEFINE_PER_CPU(tvec_base_t *, tvec_bases); 84
85tvec_base_t boot_tvec_bases; 85tvec_base_t boot_tvec_bases;
86EXPORT_SYMBOL(boot_tvec_bases); 86EXPORT_SYMBOL(boot_tvec_bases);
87static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
87 88
88static inline void set_running_timer(tvec_base_t *base, 89static inline void set_running_timer(tvec_base_t *base,
89 struct timer_list *timer) 90 struct timer_list *timer)
@@ -1224,28 +1225,36 @@ static int __devinit init_timers_cpu(int cpu)
1224{ 1225{
1225 int j; 1226 int j;
1226 tvec_base_t *base; 1227 tvec_base_t *base;
1228 static char __devinitdata tvec_base_done[NR_CPUS];
1227 1229
1228 base = per_cpu(tvec_bases, cpu); 1230 if (!tvec_base_done[cpu]) {
1229 if (!base) {
1230 static char boot_done; 1231 static char boot_done;
1231 1232
1232 /*
1233 * Cannot do allocation in init_timers as that runs before the
1234 * allocator initializes (and would waste memory if there are
1235 * more possible CPUs than will ever be installed/brought up).
1236 */
1237 if (boot_done) { 1233 if (boot_done) {
1234 /*
1235 * The APs use this path later in boot
1236 */
1238 base = kmalloc_node(sizeof(*base), GFP_KERNEL, 1237 base = kmalloc_node(sizeof(*base), GFP_KERNEL,
1239 cpu_to_node(cpu)); 1238 cpu_to_node(cpu));
1240 if (!base) 1239 if (!base)
1241 return -ENOMEM; 1240 return -ENOMEM;
1242 memset(base, 0, sizeof(*base)); 1241 memset(base, 0, sizeof(*base));
1242 per_cpu(tvec_bases, cpu) = base;
1243 } else { 1243 } else {
1244 base = &boot_tvec_bases; 1244 /*
1245 * This is for the boot CPU - we use compile-time
1246 * static initialisation because per-cpu memory isn't
1247 * ready yet and because the memory allocators are not
1248 * initialised either.
1249 */
1245 boot_done = 1; 1250 boot_done = 1;
1251 base = &boot_tvec_bases;
1246 } 1252 }
1247 per_cpu(tvec_bases, cpu) = base; 1253 tvec_base_done[cpu] = 1;
1254 } else {
1255 base = per_cpu(tvec_bases, cpu);
1248 } 1256 }
1257
1249 spin_lock_init(&base->lock); 1258 spin_lock_init(&base->lock);
1250 for (j = 0; j < TVN_SIZE; j++) { 1259 for (j = 0; j < TVN_SIZE; j++) {
1251 INIT_LIST_HEAD(base->tv5.vec + j); 1260 INIT_LIST_HEAD(base->tv5.vec + j);
@@ -1455,7 +1464,7 @@ static void time_interpolator_update(long delta_nsec)
1455 */ 1464 */
1456 if (jiffies % INTERPOLATOR_ADJUST == 0) 1465 if (jiffies % INTERPOLATOR_ADJUST == 0)
1457 { 1466 {
1458 if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC) 1467 if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
1459 time_interpolator->nsec_per_cyc--; 1468 time_interpolator->nsec_per_cyc--;
1460 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) 1469 if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
1461 time_interpolator->nsec_per_cyc++; 1470 time_interpolator->nsec_per_cyc++;
diff --git a/kernel/uid16.c b/kernel/uid16.c
index aa25605027c8..187e2a423878 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -20,43 +20,67 @@
20 20
21asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group) 21asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group)
22{ 22{
23 return sys_chown(filename, low2highuid(user), low2highgid(group)); 23 long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
24 /* avoid REGPARM breakage on x86: */
25 prevent_tail_call(ret);
26 return ret;
24} 27}
25 28
26asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group) 29asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group)
27{ 30{
28 return sys_lchown(filename, low2highuid(user), low2highgid(group)); 31 long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
32 /* avoid REGPARM breakage on x86: */
33 prevent_tail_call(ret);
34 return ret;
29} 35}
30 36
31asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group) 37asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
32{ 38{
33 return sys_fchown(fd, low2highuid(user), low2highgid(group)); 39 long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
40 /* avoid REGPARM breakage on x86: */
41 prevent_tail_call(ret);
42 return ret;
34} 43}
35 44
36asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid) 45asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
37{ 46{
38 return sys_setregid(low2highgid(rgid), low2highgid(egid)); 47 long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
48 /* avoid REGPARM breakage on x86: */
49 prevent_tail_call(ret);
50 return ret;
39} 51}
40 52
41asmlinkage long sys_setgid16(old_gid_t gid) 53asmlinkage long sys_setgid16(old_gid_t gid)
42{ 54{
43 return sys_setgid(low2highgid(gid)); 55 long ret = sys_setgid(low2highgid(gid));
56 /* avoid REGPARM breakage on x86: */
57 prevent_tail_call(ret);
58 return ret;
44} 59}
45 60
46asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid) 61asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
47{ 62{
48 return sys_setreuid(low2highuid(ruid), low2highuid(euid)); 63 long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
64 /* avoid REGPARM breakage on x86: */
65 prevent_tail_call(ret);
66 return ret;
49} 67}
50 68
51asmlinkage long sys_setuid16(old_uid_t uid) 69asmlinkage long sys_setuid16(old_uid_t uid)
52{ 70{
53 return sys_setuid(low2highuid(uid)); 71 long ret = sys_setuid(low2highuid(uid));
72 /* avoid REGPARM breakage on x86: */
73 prevent_tail_call(ret);
74 return ret;
54} 75}
55 76
56asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) 77asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
57{ 78{
58 return sys_setresuid(low2highuid(ruid), low2highuid(euid), 79 long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
59 low2highuid(suid)); 80 low2highuid(suid));
81 /* avoid REGPARM breakage on x86: */
82 prevent_tail_call(ret);
83 return ret;
60} 84}
61 85
62asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) 86asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid)
@@ -72,8 +96,11 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid,
72 96
73asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) 97asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
74{ 98{
75 return sys_setresgid(low2highgid(rgid), low2highgid(egid), 99 long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
76 low2highgid(sgid)); 100 low2highgid(sgid));
101 /* avoid REGPARM breakage on x86: */
102 prevent_tail_call(ret);
103 return ret;
77} 104}
78 105
79asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) 106asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid)
@@ -89,12 +116,18 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid,
89 116
90asmlinkage long sys_setfsuid16(old_uid_t uid) 117asmlinkage long sys_setfsuid16(old_uid_t uid)
91{ 118{
92 return sys_setfsuid(low2highuid(uid)); 119 long ret = sys_setfsuid(low2highuid(uid));
120 /* avoid REGPARM breakage on x86: */
121 prevent_tail_call(ret);
122 return ret;
93} 123}
94 124
95asmlinkage long sys_setfsgid16(old_gid_t gid) 125asmlinkage long sys_setfsgid16(old_gid_t gid)
96{ 126{
97 return sys_setfsgid(low2highgid(gid)); 127 long ret = sys_setfsgid(low2highgid(gid));
128 /* avoid REGPARM breakage on x86: */
129 prevent_tail_call(ret);
130 return ret;
98} 131}
99 132
100static int groups16_to_user(old_gid_t __user *grouplist, 133static int groups16_to_user(old_gid_t __user *grouplist,