aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-07 12:31:54 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-07 12:31:54 -0500
commit673f8205914a12e928c65afbcd78ae748f78df53 (patch)
tree38c60215646d079fab3bff812e094e914960c7ec /kernel
parentcf47b8f3d96b0b8b10b557444a28b3ca4024ff82 (diff)
parentae1a25da8448271a99745da03100d5299575a269 (diff)
Merge branch 'linus' into core/locking
Conflicts: fs/btrfs/locking.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/async.c12
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/cgroup.c28
-rw-r--r--kernel/cpuset.c15
-rw-r--r--kernel/dma-coherent.c47
-rw-r--r--kernel/exec_domain.c3
-rw-r--r--kernel/exit.c17
-rw-r--r--kernel/fork.c21
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/hrtimer.c49
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/handle.c16
-rw-r--r--kernel/irq/manage.c10
-rw-r--r--kernel/irq/numa_migrate.c7
-rw-r--r--kernel/itimer.c7
-rw-r--r--kernel/kallsyms.c16
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/module.c45
-rw-r--r--kernel/posix-cpu-timers.c70
-rw-r--r--kernel/posix-timers.c43
-rw-r--r--kernel/power/Makefile3
-rw-r--r--kernel/power/disk.c10
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcutree.c2
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/resource.c1
-rw-r--r--kernel/sched.c57
-rw-r--r--kernel/sched_fair.c69
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/sched_stats.h33
-rw-r--r--kernel/signal.c61
-rw-r--r--kernel/smp.c36
-rw-r--r--kernel/softlockup.c9
-rw-r--r--kernel/sys.c86
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c15
-rw-r--r--kernel/time.c14
-rw-r--r--kernel/time/tick-common.c26
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/timer.c18
-rw-r--r--kernel/trace/ftrace.c32
-rw-r--r--kernel/trace/ring_buffer.c15
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace_irqsoff.c1
-rw-r--r--kernel/trace/trace_sched_wakeup.c1
-rw-r--r--kernel/uid16.c39
-rw-r--r--kernel/wait.c59
-rw-r--r--kernel/workqueue.c20
53 files changed, 637 insertions, 440 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 2aebc4cd7878..170a9213c1b6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -40,9 +40,8 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
40obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o 40obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
41obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o 41obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
42obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 42obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
43ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y) 43obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
44obj-y += smp.o 44ifneq ($(CONFIG_SMP),y)
45else
46obj-y += up.o 45obj-y += up.o
47endif 46endif
48obj-$(CONFIG_SMP) += spinlock.o 47obj-$(CONFIG_SMP) += spinlock.o
diff --git a/kernel/acct.c b/kernel/acct.c
index d57b7cbb98b6..7afa31564162 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -277,7 +277,7 @@ static int acct_on(char *name)
277 * should be written. If the filename is NULL, accounting will be 277 * should be written. If the filename is NULL, accounting will be
278 * shutdown. 278 * shutdown.
279 */ 279 */
280asmlinkage long sys_acct(const char __user *name) 280SYSCALL_DEFINE1(acct, const char __user *, name)
281{ 281{
282 int error; 282 int error;
283 283
diff --git a/kernel/async.c b/kernel/async.c
index 608b32b42812..67a2be71f517 100644
--- a/kernel/async.c
+++ b/kernel/async.c
@@ -138,15 +138,18 @@ static void run_one_entry(void)
138 138
139 /* 3) run it (and print duration)*/ 139 /* 3) run it (and print duration)*/
140 if (initcall_debug && system_state == SYSTEM_BOOTING) { 140 if (initcall_debug && system_state == SYSTEM_BOOTING) {
141 printk("calling %lli_%pF @ %i\n", entry->cookie, entry->func, task_pid_nr(current)); 141 printk("calling %lli_%pF @ %i\n", (long long)entry->cookie,
142 entry->func, task_pid_nr(current));
142 calltime = ktime_get(); 143 calltime = ktime_get();
143 } 144 }
144 entry->func(entry->data, entry->cookie); 145 entry->func(entry->data, entry->cookie);
145 if (initcall_debug && system_state == SYSTEM_BOOTING) { 146 if (initcall_debug && system_state == SYSTEM_BOOTING) {
146 rettime = ktime_get(); 147 rettime = ktime_get();
147 delta = ktime_sub(rettime, calltime); 148 delta = ktime_sub(rettime, calltime);
148 printk("initcall %lli_%pF returned 0 after %lld usecs\n", entry->cookie, 149 printk("initcall %lli_%pF returned 0 after %lld usecs\n",
149 entry->func, ktime_to_ns(delta) >> 10); 150 (long long)entry->cookie,
151 entry->func,
152 (long long)ktime_to_ns(delta) >> 10);
150 } 153 }
151 154
152 /* 4) remove it from the running queue */ 155 /* 4) remove it from the running queue */
@@ -247,7 +250,8 @@ void async_synchronize_cookie_special(async_cookie_t cookie, struct list_head *r
247 delta = ktime_sub(endtime, starttime); 250 delta = ktime_sub(endtime, starttime);
248 251
249 printk("async_continuing @ %i after %lli usec\n", 252 printk("async_continuing @ %i after %lli usec\n",
250 task_pid_nr(current), ktime_to_ns(delta) >> 10); 253 task_pid_nr(current),
254 (long long)ktime_to_ns(delta) >> 10);
251 } 255 }
252} 256}
253EXPORT_SYMBOL_GPL(async_synchronize_cookie_special); 257EXPORT_SYMBOL_GPL(async_synchronize_cookie_special);
diff --git a/kernel/capability.c b/kernel/capability.c
index 688926e496be..4e17041963f5 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -161,7 +161,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
161 * 161 *
162 * Returns 0 on success and < 0 on error. 162 * Returns 0 on success and < 0 on error.
163 */ 163 */
164asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) 164SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
165{ 165{
166 int ret = 0; 166 int ret = 0;
167 pid_t pid; 167 pid_t pid;
@@ -235,7 +235,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
235 * 235 *
236 * Returns 0 on success and < 0 on error. 236 * Returns 0 on success and < 0 on error.
237 */ 237 */
238asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) 238SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
239{ 239{
240 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 240 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
241 unsigned i, tocopy; 241 unsigned i, tocopy;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c29831076e7a..5a54ff42874e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1115,8 +1115,10 @@ static void cgroup_kill_sb(struct super_block *sb) {
1115 } 1115 }
1116 write_unlock(&css_set_lock); 1116 write_unlock(&css_set_lock);
1117 1117
1118 list_del(&root->root_list); 1118 if (!list_empty(&root->root_list)) {
1119 root_count--; 1119 list_del(&root->root_list);
1120 root_count--;
1121 }
1120 1122
1121 mutex_unlock(&cgroup_mutex); 1123 mutex_unlock(&cgroup_mutex);
1122 1124
@@ -2434,7 +2436,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2434 2436
2435 err_remove: 2437 err_remove:
2436 2438
2439 cgroup_lock_hierarchy(root);
2437 list_del(&cgrp->sibling); 2440 list_del(&cgrp->sibling);
2441 cgroup_unlock_hierarchy(root);
2438 root->number_of_cgroups--; 2442 root->number_of_cgroups--;
2439 2443
2440 err_destroy: 2444 err_destroy:
@@ -2507,7 +2511,7 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp)
2507 for_each_subsys(cgrp->root, ss) { 2511 for_each_subsys(cgrp->root, ss) {
2508 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; 2512 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2509 int refcnt; 2513 int refcnt;
2510 do { 2514 while (1) {
2511 /* We can only remove a CSS with a refcnt==1 */ 2515 /* We can only remove a CSS with a refcnt==1 */
2512 refcnt = atomic_read(&css->refcnt); 2516 refcnt = atomic_read(&css->refcnt);
2513 if (refcnt > 1) { 2517 if (refcnt > 1) {
@@ -2521,7 +2525,10 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp)
2521 * css_tryget() to spin until we set the 2525 * css_tryget() to spin until we set the
2522 * CSS_REMOVED bits or abort 2526 * CSS_REMOVED bits or abort
2523 */ 2527 */
2524 } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt); 2528 if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
2529 break;
2530 cpu_relax();
2531 }
2525 } 2532 }
2526 done: 2533 done:
2527 for_each_subsys(cgrp->root, ss) { 2534 for_each_subsys(cgrp->root, ss) {
@@ -2991,20 +2998,21 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2991 mutex_unlock(&cgroup_mutex); 2998 mutex_unlock(&cgroup_mutex);
2992 return 0; 2999 return 0;
2993 } 3000 }
2994 task_lock(tsk);
2995 cg = tsk->cgroups;
2996 parent = task_cgroup(tsk, subsys->subsys_id);
2997 3001
2998 /* Pin the hierarchy */ 3002 /* Pin the hierarchy */
2999 if (!atomic_inc_not_zero(&parent->root->sb->s_active)) { 3003 if (!atomic_inc_not_zero(&root->sb->s_active)) {
3000 /* We race with the final deactivate_super() */ 3004 /* We race with the final deactivate_super() */
3001 mutex_unlock(&cgroup_mutex); 3005 mutex_unlock(&cgroup_mutex);
3002 return 0; 3006 return 0;
3003 } 3007 }
3004 3008
3005 /* Keep the cgroup alive */ 3009 /* Keep the cgroup alive */
3010 task_lock(tsk);
3011 parent = task_cgroup(tsk, subsys->subsys_id);
3012 cg = tsk->cgroups;
3006 get_css_set(cg); 3013 get_css_set(cg);
3007 task_unlock(tsk); 3014 task_unlock(tsk);
3015
3008 mutex_unlock(&cgroup_mutex); 3016 mutex_unlock(&cgroup_mutex);
3009 3017
3010 /* Now do the VFS work to create a cgroup */ 3018 /* Now do the VFS work to create a cgroup */
@@ -3043,7 +3051,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
3043 mutex_unlock(&inode->i_mutex); 3051 mutex_unlock(&inode->i_mutex);
3044 put_css_set(cg); 3052 put_css_set(cg);
3045 3053
3046 deactivate_super(parent->root->sb); 3054 deactivate_super(root->sb);
3047 /* The cgroup is still accessible in the VFS, but 3055 /* The cgroup is still accessible in the VFS, but
3048 * we're not going to try to rmdir() it at this 3056 * we're not going to try to rmdir() it at this
3049 * point. */ 3057 * point. */
@@ -3069,7 +3077,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
3069 mutex_lock(&cgroup_mutex); 3077 mutex_lock(&cgroup_mutex);
3070 put_css_set(cg); 3078 put_css_set(cg);
3071 mutex_unlock(&cgroup_mutex); 3079 mutex_unlock(&cgroup_mutex);
3072 deactivate_super(parent->root->sb); 3080 deactivate_super(root->sb);
3073 return ret; 3081 return ret;
3074} 3082}
3075 3083
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 647c77a88fcb..f76db9dcaa05 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -61,6 +61,14 @@
61#include <linux/cgroup.h> 61#include <linux/cgroup.h>
62 62
63/* 63/*
64 * Workqueue for cpuset related tasks.
65 *
66 * Using kevent workqueue may cause deadlock when memory_migrate
67 * is set. So we create a separate workqueue thread for cpuset.
68 */
69static struct workqueue_struct *cpuset_wq;
70
71/*
64 * Tracks how many cpusets are currently defined in system. 72 * Tracks how many cpusets are currently defined in system.
65 * When there is only one cpuset (the root cpuset) we can 73 * When there is only one cpuset (the root cpuset) we can
66 * short circuit some hooks. 74 * short circuit some hooks.
@@ -568,7 +576,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
568 * load balancing domains (sched domains) as specified by that partial 576 * load balancing domains (sched domains) as specified by that partial
569 * partition. 577 * partition.
570 * 578 *
571 * See "What is sched_load_balance" in Documentation/cpusets.txt 579 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
572 * for a background explanation of this. 580 * for a background explanation of this.
573 * 581 *
574 * Does not return errors, on the theory that the callers of this 582 * Does not return errors, on the theory that the callers of this
@@ -831,7 +839,7 @@ static DECLARE_WORK(rebuild_sched_domains_work, do_rebuild_sched_domains);
831 */ 839 */
832static void async_rebuild_sched_domains(void) 840static void async_rebuild_sched_domains(void)
833{ 841{
834 schedule_work(&rebuild_sched_domains_work); 842 queue_work(cpuset_wq, &rebuild_sched_domains_work);
835} 843}
836 844
837/* 845/*
@@ -2111,6 +2119,9 @@ void __init cpuset_init_smp(void)
2111 2119
2112 hotcpu_notifier(cpuset_track_online_cpus, 0); 2120 hotcpu_notifier(cpuset_track_online_cpus, 0);
2113 hotplug_memory_notifier(cpuset_track_online_nodes, 10); 2121 hotplug_memory_notifier(cpuset_track_online_nodes, 10);
2122
2123 cpuset_wq = create_singlethread_workqueue("cpuset");
2124 BUG_ON(!cpuset_wq);
2114} 2125}
2115 2126
2116/** 2127/**
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index 038707404b76..962a3b574f21 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
98 * @size: size of requested memory area 98 * @size: size of requested memory area
99 * @dma_handle: This will be filled with the correct dma handle 99 * @dma_handle: This will be filled with the correct dma handle
100 * @ret: This pointer will be filled with the virtual address 100 * @ret: This pointer will be filled with the virtual address
101 * to allocated area. 101 * to allocated area.
102 * 102 *
103 * This function should be only called from per-arch dma_alloc_coherent() 103 * This function should be only called from per-arch dma_alloc_coherent()
104 * to support allocation from per-device coherent memory pools. 104 * to support allocation from per-device coherent memory pools.
@@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
118 mem = dev->dma_mem; 118 mem = dev->dma_mem;
119 if (!mem) 119 if (!mem)
120 return 0; 120 return 0;
121 if (unlikely(size > mem->size)) 121
122 return 0; 122 *ret = NULL;
123
124 if (unlikely(size > (mem->size << PAGE_SHIFT)))
125 goto err;
123 126
124 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); 127 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
125 if (pageno >= 0) { 128 if (unlikely(pageno < 0))
126 /* 129 goto err;
127 * Memory was found in the per-device arena. 130
128 */ 131 /*
129 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); 132 * Memory was found in the per-device area.
130 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 133 */
131 memset(*ret, 0, size); 134 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
132 } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) { 135 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
133 /* 136 memset(*ret, 0, size);
134 * The per-device arena is exhausted and we are not 137
135 * permitted to fall back to generic memory.
136 */
137 *ret = NULL;
138 } else {
139 /*
140 * The per-device arena is exhausted and we are
141 * permitted to fall back to generic memory.
142 */
143 return 0;
144 }
145 return 1; 138 return 1;
139
140err:
141 /*
142 * In the case where the allocation can not be satisfied from the
143 * per-device area, try to fall back to generic memory if the
144 * constraints allow it.
145 */
146 return mem->flags & DMA_MEMORY_EXCLUSIVE;
146} 147}
147EXPORT_SYMBOL(dma_alloc_from_coherent); 148EXPORT_SYMBOL(dma_alloc_from_coherent);
148 149
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 0511716e9424..667c841c2952 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -209,8 +209,7 @@ static int __init proc_execdomains_init(void)
209module_init(proc_execdomains_init); 209module_init(proc_execdomains_init);
210#endif 210#endif
211 211
212asmlinkage long 212SYSCALL_DEFINE1(personality, u_long, personality)
213sys_personality(u_long personality)
214{ 213{
215 u_long old = current->personality; 214 u_long old = current->personality;
216 215
diff --git a/kernel/exit.c b/kernel/exit.c
index c7740fa3252c..f80dec3f1875 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1141,7 +1141,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1141 1141
1142EXPORT_SYMBOL(complete_and_exit); 1142EXPORT_SYMBOL(complete_and_exit);
1143 1143
1144asmlinkage long sys_exit(int error_code) 1144SYSCALL_DEFINE1(exit, int, error_code)
1145{ 1145{
1146 do_exit((error_code&0xff)<<8); 1146 do_exit((error_code&0xff)<<8);
1147} 1147}
@@ -1182,9 +1182,11 @@ do_group_exit(int exit_code)
1182 * wait4()-ing process will get the correct exit code - even if this 1182 * wait4()-ing process will get the correct exit code - even if this
1183 * thread is not the thread group leader. 1183 * thread is not the thread group leader.
1184 */ 1184 */
1185asmlinkage void sys_exit_group(int error_code) 1185SYSCALL_DEFINE1(exit_group, int, error_code)
1186{ 1186{
1187 do_group_exit((error_code & 0xff) << 8); 1187 do_group_exit((error_code & 0xff) << 8);
1188 /* NOTREACHED */
1189 return 0;
1188} 1190}
1189 1191
1190static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 1192static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
@@ -1752,9 +1754,8 @@ end:
1752 return retval; 1754 return retval;
1753} 1755}
1754 1756
1755asmlinkage long sys_waitid(int which, pid_t upid, 1757SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1756 struct siginfo __user *infop, int options, 1758 infop, int, options, struct rusage __user *, ru)
1757 struct rusage __user *ru)
1758{ 1759{
1759 struct pid *pid = NULL; 1760 struct pid *pid = NULL;
1760 enum pid_type type; 1761 enum pid_type type;
@@ -1793,8 +1794,8 @@ asmlinkage long sys_waitid(int which, pid_t upid,
1793 return ret; 1794 return ret;
1794} 1795}
1795 1796
1796asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr, 1797SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1797 int options, struct rusage __user *ru) 1798 int, options, struct rusage __user *, ru)
1798{ 1799{
1799 struct pid *pid = NULL; 1800 struct pid *pid = NULL;
1800 enum pid_type type; 1801 enum pid_type type;
@@ -1831,7 +1832,7 @@ asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
1831 * sys_waitpid() remains for compatibility. waitpid() should be 1832 * sys_waitpid() remains for compatibility. waitpid() should be
1832 * implemented by calling sys_wait4() from libc.a. 1833 * implemented by calling sys_wait4() from libc.a.
1833 */ 1834 */
1834asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) 1835SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1835{ 1836{
1836 return sys_wait4(pid, stat_addr, options, NULL); 1837 return sys_wait4(pid, stat_addr, options, NULL);
1837} 1838}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1d68f1255dd8..6d5dbb7a13e2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
817static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 817static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
818{ 818{
819 struct signal_struct *sig; 819 struct signal_struct *sig;
820 int ret;
821 820
822 if (clone_flags & CLONE_THREAD) { 821 if (clone_flags & CLONE_THREAD) {
823 ret = thread_group_cputime_clone_thread(current); 822 atomic_inc(&current->signal->count);
824 if (likely(!ret)) { 823 atomic_inc(&current->signal->live);
825 atomic_inc(&current->signal->count); 824 return 0;
826 atomic_inc(&current->signal->live);
827 }
828 return ret;
829 } 825 }
830 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 826 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
827
828 if (sig)
829 posix_cpu_timers_init_group(sig);
830
831 tsk->signal = sig; 831 tsk->signal = sig;
832 if (!sig) 832 if (!sig)
833 return -ENOMEM; 833 return -ENOMEM;
@@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
864 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 864 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
865 task_unlock(current->group_leader); 865 task_unlock(current->group_leader);
866 866
867 posix_cpu_timers_init_group(sig);
868
869 acct_init_pacct(&sig->pacct); 867 acct_init_pacct(&sig->pacct);
870 868
871 tty_audit_fork(sig); 869 tty_audit_fork(sig);
@@ -901,7 +899,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
901 clear_freeze_flag(p); 899 clear_freeze_flag(p);
902} 900}
903 901
904asmlinkage long sys_set_tid_address(int __user *tidptr) 902SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
905{ 903{
906 current->clear_child_tid = tidptr; 904 current->clear_child_tid = tidptr;
907 905
@@ -1007,6 +1005,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1007 * triggers too late. This doesn't hurt, the check is only there 1005 * triggers too late. This doesn't hurt, the check is only there
1008 * to stop root fork bombs. 1006 * to stop root fork bombs.
1009 */ 1007 */
1008 retval = -EAGAIN;
1010 if (nr_threads >= max_threads) 1009 if (nr_threads >= max_threads)
1011 goto bad_fork_cleanup_count; 1010 goto bad_fork_cleanup_count;
1012 1011
@@ -1603,7 +1602,7 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp
1603 * constructed. Here we are modifying the current, active, 1602 * constructed. Here we are modifying the current, active,
1604 * task_struct. 1603 * task_struct.
1605 */ 1604 */
1606asmlinkage long sys_unshare(unsigned long unshare_flags) 1605SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1607{ 1606{
1608 int err = 0; 1607 int err = 0;
1609 struct fs_struct *fs, *new_fs = NULL; 1608 struct fs_struct *fs, *new_fs = NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index 002aa189eb09..f89d373a9c6d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1733,9 +1733,8 @@ pi_faulted:
1733 * @head: pointer to the list-head 1733 * @head: pointer to the list-head
1734 * @len: length of the list-head, as userspace expects 1734 * @len: length of the list-head, as userspace expects
1735 */ 1735 */
1736asmlinkage long 1736SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
1737sys_set_robust_list(struct robust_list_head __user *head, 1737 size_t, len)
1738 size_t len)
1739{ 1738{
1740 if (!futex_cmpxchg_enabled) 1739 if (!futex_cmpxchg_enabled)
1741 return -ENOSYS; 1740 return -ENOSYS;
@@ -1756,9 +1755,9 @@ sys_set_robust_list(struct robust_list_head __user *head,
1756 * @head_ptr: pointer to a list-head pointer, the kernel fills it in 1755 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1757 * @len_ptr: pointer to a length field, the kernel fills in the header size 1756 * @len_ptr: pointer to a length field, the kernel fills in the header size
1758 */ 1757 */
1759asmlinkage long 1758SYSCALL_DEFINE3(get_robust_list, int, pid,
1760sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, 1759 struct robust_list_head __user * __user *, head_ptr,
1761 size_t __user *len_ptr) 1760 size_t __user *, len_ptr)
1762{ 1761{
1763 struct robust_list_head __user *head; 1762 struct robust_list_head __user *head;
1764 unsigned long ret; 1763 unsigned long ret;
@@ -1978,9 +1977,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1978} 1977}
1979 1978
1980 1979
1981asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, 1980SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1982 struct timespec __user *utime, u32 __user *uaddr2, 1981 struct timespec __user *, utime, u32 __user *, uaddr2,
1983 u32 val3) 1982 u32, val3)
1984{ 1983{
1985 struct timespec ts; 1984 struct timespec ts;
1986 ktime_t t, *tp = NULL; 1985 ktime_t t, *tp = NULL;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1455b7651b6b..f394d2a42ca3 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -501,6 +501,13 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
501 continue; 501 continue;
502 timer = rb_entry(base->first, struct hrtimer, node); 502 timer = rb_entry(base->first, struct hrtimer, node);
503 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 503 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
504 /*
505 * clock_was_set() has changed base->offset so the
506 * result might be negative. Fix it up to prevent a
507 * false positive in clockevents_program_event()
508 */
509 if (expires.tv64 < 0)
510 expires.tv64 = 0;
504 if (expires.tv64 < cpu_base->expires_next.tv64) 511 if (expires.tv64 < cpu_base->expires_next.tv64)
505 cpu_base->expires_next = expires; 512 cpu_base->expires_next = expires;
506 } 513 }
@@ -614,7 +621,9 @@ void clock_was_set(void)
614 */ 621 */
615void hres_timers_resume(void) 622void hres_timers_resume(void)
616{ 623{
617 /* Retrigger the CPU local events: */ 624 WARN_ONCE(!irqs_disabled(),
625 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
626
618 retrigger_next_event(NULL); 627 retrigger_next_event(NULL);
619} 628}
620 629
@@ -1156,6 +1165,29 @@ static void __run_hrtimer(struct hrtimer *timer)
1156 1165
1157#ifdef CONFIG_HIGH_RES_TIMERS 1166#ifdef CONFIG_HIGH_RES_TIMERS
1158 1167
1168static int force_clock_reprogram;
1169
1170/*
1171 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
1172 * is hanging, which could happen with something that slows the interrupt
1173 * such as the tracing. Then we force the clock reprogramming for each future
1174 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
1175 * threshold that we will overwrite.
1176 * The next tick event will be scheduled to 3 times we currently spend on
1177 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
1178 * 1/4 of their time to process the hrtimer interrupts. This is enough to
1179 * let it running without serious starvation.
1180 */
1181
1182static inline void
1183hrtimer_interrupt_hanging(struct clock_event_device *dev,
1184 ktime_t try_time)
1185{
1186 force_clock_reprogram = 1;
1187 dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
1188 printk(KERN_WARNING "hrtimer: interrupt too slow, "
1189 "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
1190}
1159/* 1191/*
1160 * High resolution timer interrupt 1192 * High resolution timer interrupt
1161 * Called with interrupts disabled 1193 * Called with interrupts disabled
@@ -1165,6 +1197,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1165 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1197 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1166 struct hrtimer_clock_base *base; 1198 struct hrtimer_clock_base *base;
1167 ktime_t expires_next, now; 1199 ktime_t expires_next, now;
1200 int nr_retries = 0;
1168 int i; 1201 int i;
1169 1202
1170 BUG_ON(!cpu_base->hres_active); 1203 BUG_ON(!cpu_base->hres_active);
@@ -1172,6 +1205,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1172 dev->next_event.tv64 = KTIME_MAX; 1205 dev->next_event.tv64 = KTIME_MAX;
1173 1206
1174 retry: 1207 retry:
1208 /* 5 retries is enough to notice a hang */
1209 if (!(++nr_retries % 5))
1210 hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
1211
1175 now = ktime_get(); 1212 now = ktime_get();
1176 1213
1177 expires_next.tv64 = KTIME_MAX; 1214 expires_next.tv64 = KTIME_MAX;
@@ -1224,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1224 1261
1225 /* Reprogramming necessary ? */ 1262 /* Reprogramming necessary ? */
1226 if (expires_next.tv64 != KTIME_MAX) { 1263 if (expires_next.tv64 != KTIME_MAX) {
1227 if (tick_program_event(expires_next, 0)) 1264 if (tick_program_event(expires_next, force_clock_reprogram))
1228 goto retry; 1265 goto retry;
1229 } 1266 }
1230} 1267}
@@ -1467,8 +1504,8 @@ out:
1467 return ret; 1504 return ret;
1468} 1505}
1469 1506
1470asmlinkage long 1507SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1471sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) 1508 struct timespec __user *, rmtp)
1472{ 1509{
1473 struct timespec tu; 1510 struct timespec tu;
1474 1511
@@ -1578,6 +1615,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1578 break; 1615 break;
1579 1616
1580#ifdef CONFIG_HOTPLUG_CPU 1617#ifdef CONFIG_HOTPLUG_CPU
1618 case CPU_DYING:
1619 case CPU_DYING_FROZEN:
1620 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1621 break;
1581 case CPU_DEAD: 1622 case CPU_DEAD:
1582 case CPU_DEAD_FROZEN: 1623 case CPU_DEAD_FROZEN:
1583 { 1624 {
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index f63c706d25e1..7de11bd64dfe 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -383,6 +383,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
383out_unlock: 383out_unlock:
384 spin_unlock(&desc->lock); 384 spin_unlock(&desc->lock);
385} 385}
386EXPORT_SYMBOL_GPL(handle_level_irq);
386 387
387/** 388/**
388 * handle_fasteoi_irq - irq handler for transparent controllers 389 * handle_fasteoi_irq - irq handler for transparent controllers
@@ -593,6 +594,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
593 } 594 }
594 spin_unlock_irqrestore(&desc->lock, flags); 595 spin_unlock_irqrestore(&desc->lock, flags);
595} 596}
597EXPORT_SYMBOL_GPL(__set_irq_handler);
596 598
597void 599void
598set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip, 600set_irq_chip_and_handler(unsigned int irq, struct irq_chip *chip,
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c20db0be9173..3aba8d12f328 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -39,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
39 ack_bad_irq(irq); 39 ack_bad_irq(irq);
40} 40}
41 41
42#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
43static void __init init_irq_default_affinity(void)
44{
45 alloc_bootmem_cpumask_var(&irq_default_affinity);
46 cpumask_setall(irq_default_affinity);
47}
48#else
49static void __init init_irq_default_affinity(void)
50{
51}
52#endif
53
42/* 54/*
43 * Linux has a controller-independent interrupt architecture. 55 * Linux has a controller-independent interrupt architecture.
44 * Every controller has a 'controller-template', that is used 56 * Every controller has a 'controller-template', that is used
@@ -134,6 +146,8 @@ int __init early_irq_init(void)
134 int legacy_count; 146 int legacy_count;
135 int i; 147 int i;
136 148
149 init_irq_default_affinity();
150
137 desc = irq_desc_legacy; 151 desc = irq_desc_legacy;
138 legacy_count = ARRAY_SIZE(irq_desc_legacy); 152 legacy_count = ARRAY_SIZE(irq_desc_legacy);
139 153
@@ -219,6 +233,8 @@ int __init early_irq_init(void)
219 int count; 233 int count;
220 int i; 234 int i;
221 235
236 init_irq_default_affinity();
237
222 desc = irq_desc; 238 desc = irq_desc;
223 count = ARRAY_SIZE(irq_desc); 239 count = ARRAY_SIZE(irq_desc);
224 240
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cd0cd8dcb345..291f03664552 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -15,17 +15,9 @@
15 15
16#include "internals.h" 16#include "internals.h"
17 17
18#ifdef CONFIG_SMP 18#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
19cpumask_var_t irq_default_affinity; 19cpumask_var_t irq_default_affinity;
20 20
21static int init_irq_default_affinity(void)
22{
23 alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
24 cpumask_setall(irq_default_affinity);
25 return 0;
26}
27core_initcall(init_irq_default_affinity);
28
29/** 21/**
30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 22 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
31 * @irq: interrupt number to wait for 23 * @irq: interrupt number to wait for
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index ecf765c6a77a..acd88356ac76 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -71,7 +71,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
71 desc = irq_desc_ptrs[irq]; 71 desc = irq_desc_ptrs[irq];
72 72
73 if (desc && old_desc != desc) 73 if (desc && old_desc != desc)
74 goto out_unlock; 74 goto out_unlock;
75 75
76 node = cpu_to_node(cpu); 76 node = cpu_to_node(cpu);
77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 77 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
@@ -84,10 +84,15 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
84 init_copy_one_irq_desc(irq, old_desc, desc, cpu); 84 init_copy_one_irq_desc(irq, old_desc, desc, cpu);
85 85
86 irq_desc_ptrs[irq] = desc; 86 irq_desc_ptrs[irq] = desc;
87 spin_unlock_irqrestore(&sparse_irq_lock, flags);
87 88
88 /* free the old one */ 89 /* free the old one */
89 free_one_irq_desc(old_desc, desc); 90 free_one_irq_desc(old_desc, desc);
91 spin_unlock(&old_desc->lock);
90 kfree(old_desc); 92 kfree(old_desc);
93 spin_lock(&desc->lock);
94
95 return desc;
91 96
92out_unlock: 97out_unlock:
93 spin_unlock_irqrestore(&sparse_irq_lock, flags); 98 spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/itimer.c b/kernel/itimer.c
index db7c358b9a02..6a5fe93dd8bd 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -100,7 +100,7 @@ int do_getitimer(int which, struct itimerval *value)
100 return 0; 100 return 0;
101} 101}
102 102
103asmlinkage long sys_getitimer(int which, struct itimerval __user *value) 103SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
104{ 104{
105 int error = -EFAULT; 105 int error = -EFAULT;
106 struct itimerval get_buffer; 106 struct itimerval get_buffer;
@@ -260,9 +260,8 @@ unsigned int alarm_setitimer(unsigned int seconds)
260 return it_old.it_value.tv_sec; 260 return it_old.it_value.tv_sec;
261} 261}
262 262
263asmlinkage long sys_setitimer(int which, 263SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
264 struct itimerval __user *value, 264 struct itimerval __user *, ovalue)
265 struct itimerval __user *ovalue)
266{ 265{
267 struct itimerval set_buffer, get_buffer; 266 struct itimerval set_buffer, get_buffer;
268 int error; 267 int error;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index e694afa0eb8c..7b8b0f21a5b1 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -30,19 +30,20 @@
30#define all_var 0 30#define all_var 0
31#endif 31#endif
32 32
33extern const unsigned long kallsyms_addresses[]; 33/* These will be re-linked against their real values during the second link stage */
34extern const u8 kallsyms_names[]; 34extern const unsigned long kallsyms_addresses[] __attribute__((weak));
35extern const u8 kallsyms_names[] __attribute__((weak));
35 36
36/* tell the compiler that the count isn't in the small data section if the arch 37/* tell the compiler that the count isn't in the small data section if the arch
37 * has one (eg: FRV) 38 * has one (eg: FRV)
38 */ 39 */
39extern const unsigned long kallsyms_num_syms 40extern const unsigned long kallsyms_num_syms
40 __attribute__((__section__(".rodata"))); 41__attribute__((weak, section(".rodata")));
41 42
42extern const u8 kallsyms_token_table[]; 43extern const u8 kallsyms_token_table[] __attribute__((weak));
43extern const u16 kallsyms_token_index[]; 44extern const u16 kallsyms_token_index[] __attribute__((weak));
44 45
45extern const unsigned long kallsyms_markers[]; 46extern const unsigned long kallsyms_markers[] __attribute__((weak));
46 47
47static inline int is_kernel_inittext(unsigned long addr) 48static inline int is_kernel_inittext(unsigned long addr)
48{ 49{
@@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr,
167 unsigned long symbol_start = 0, symbol_end = 0; 168 unsigned long symbol_start = 0, symbol_end = 0;
168 unsigned long i, low, high, mid; 169 unsigned long i, low, high, mid;
169 170
171 /* This kernel should never had been booted. */
172 BUG_ON(!kallsyms_addresses);
173
170 /* do a binary search on the sorted kallsyms_addresses array */ 174 /* do a binary search on the sorted kallsyms_addresses array */
171 low = 0; 175 low = 0;
172 high = kallsyms_num_syms; 176 high = kallsyms_num_syms;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 3fb855ad6aa0..8a6d7b08864e 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -934,9 +934,8 @@ struct kimage *kexec_crash_image;
934 934
935static DEFINE_MUTEX(kexec_mutex); 935static DEFINE_MUTEX(kexec_mutex);
936 936
937asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, 937SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
938 struct kexec_segment __user *segments, 938 struct kexec_segment __user *, segments, unsigned long, flags)
939 unsigned long flags)
940{ 939{
941 struct kimage **dest_image, *image; 940 struct kimage **dest_image, *image;
942 int result; 941 int result;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1b9cbdc0127a..7ba8cd9845cb 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -123,7 +123,7 @@ static int collect_garbage_slots(void);
123static int __kprobes check_safety(void) 123static int __kprobes check_safety(void)
124{ 124{
125 int ret = 0; 125 int ret = 0;
126#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) 126#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
127 ret = freeze_processes(); 127 ret = freeze_processes();
128 if (ret == 0) { 128 if (ret == 0) {
129 struct task_struct *p, *q; 129 struct task_struct *p, *q;
diff --git a/kernel/module.c b/kernel/module.c
index c9332c90d5a0..ba22484a987e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -573,13 +573,13 @@ static char last_unloaded_module[MODULE_NAME_LEN+1];
573/* Init the unload section of the module. */ 573/* Init the unload section of the module. */
574static void module_unload_init(struct module *mod) 574static void module_unload_init(struct module *mod)
575{ 575{
576 unsigned int i; 576 int cpu;
577 577
578 INIT_LIST_HEAD(&mod->modules_which_use_me); 578 INIT_LIST_HEAD(&mod->modules_which_use_me);
579 for (i = 0; i < NR_CPUS; i++) 579 for_each_possible_cpu(cpu)
580 local_set(&mod->ref[i].count, 0); 580 local_set(__module_ref_addr(mod, cpu), 0);
581 /* Hold reference count during initialization. */ 581 /* Hold reference count during initialization. */
582 local_set(&mod->ref[raw_smp_processor_id()].count, 1); 582 local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1);
583 /* Backwards compatibility macros put refcount during init. */ 583 /* Backwards compatibility macros put refcount during init. */
584 mod->waiter = current; 584 mod->waiter = current;
585} 585}
@@ -717,10 +717,11 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
717 717
718unsigned int module_refcount(struct module *mod) 718unsigned int module_refcount(struct module *mod)
719{ 719{
720 unsigned int i, total = 0; 720 unsigned int total = 0;
721 int cpu;
721 722
722 for (i = 0; i < NR_CPUS; i++) 723 for_each_possible_cpu(cpu)
723 total += local_read(&mod->ref[i].count); 724 total += local_read(__module_ref_addr(mod, cpu));
724 return total; 725 return total;
725} 726}
726EXPORT_SYMBOL(module_refcount); 727EXPORT_SYMBOL(module_refcount);
@@ -743,8 +744,8 @@ static void wait_for_zero_refcount(struct module *mod)
743 mutex_lock(&module_mutex); 744 mutex_lock(&module_mutex);
744} 745}
745 746
746asmlinkage long 747SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
747sys_delete_module(const char __user *name_user, unsigned int flags) 748 unsigned int, flags)
748{ 749{
749 struct module *mod; 750 struct module *mod;
750 char name[MODULE_NAME_LEN]; 751 char name[MODULE_NAME_LEN];
@@ -894,7 +895,7 @@ void module_put(struct module *module)
894{ 895{
895 if (module) { 896 if (module) {
896 unsigned int cpu = get_cpu(); 897 unsigned int cpu = get_cpu();
897 local_dec(&module->ref[cpu].count); 898 local_dec(__module_ref_addr(module, cpu));
898 /* Maybe they're waiting for us to drop reference? */ 899 /* Maybe they're waiting for us to drop reference? */
899 if (unlikely(!module_is_live(module))) 900 if (unlikely(!module_is_live(module)))
900 wake_up_process(module->waiter); 901 wake_up_process(module->waiter);
@@ -1464,7 +1465,10 @@ static void free_module(struct module *mod)
1464 kfree(mod->args); 1465 kfree(mod->args);
1465 if (mod->percpu) 1466 if (mod->percpu)
1466 percpu_modfree(mod->percpu); 1467 percpu_modfree(mod->percpu);
1467 1468#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
1469 if (mod->refptr)
1470 percpu_modfree(mod->refptr);
1471#endif
1468 /* Free lock-classes: */ 1472 /* Free lock-classes: */
1469 lockdep_free_key_range(mod->module_core, mod->core_size); 1473 lockdep_free_key_range(mod->module_core, mod->core_size);
1470 1474
@@ -2011,6 +2015,14 @@ static noinline struct module *load_module(void __user *umod,
2011 if (err < 0) 2015 if (err < 0)
2012 goto free_mod; 2016 goto free_mod;
2013 2017
2018#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2019 mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
2020 mod->name);
2021 if (!mod->refptr) {
2022 err = -ENOMEM;
2023 goto free_mod;
2024 }
2025#endif
2014 if (pcpuindex) { 2026 if (pcpuindex) {
2015 /* We have a special allocation for this section. */ 2027 /* We have a special allocation for this section. */
2016 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, 2028 percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size,
@@ -2018,7 +2030,7 @@ static noinline struct module *load_module(void __user *umod,
2018 mod->name); 2030 mod->name);
2019 if (!percpu) { 2031 if (!percpu) {
2020 err = -ENOMEM; 2032 err = -ENOMEM;
2021 goto free_mod; 2033 goto free_percpu;
2022 } 2034 }
2023 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; 2035 sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
2024 mod->percpu = percpu; 2036 mod->percpu = percpu;
@@ -2282,6 +2294,9 @@ static noinline struct module *load_module(void __user *umod,
2282 free_percpu: 2294 free_percpu:
2283 if (percpu) 2295 if (percpu)
2284 percpu_modfree(percpu); 2296 percpu_modfree(percpu);
2297#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
2298 percpu_modfree(mod->refptr);
2299#endif
2285 free_mod: 2300 free_mod:
2286 kfree(args); 2301 kfree(args);
2287 free_hdr: 2302 free_hdr:
@@ -2296,10 +2311,8 @@ static noinline struct module *load_module(void __user *umod,
2296} 2311}
2297 2312
2298/* This is where the real work happens */ 2313/* This is where the real work happens */
2299asmlinkage long 2314SYSCALL_DEFINE3(init_module, void __user *, umod,
2300sys_init_module(void __user *umod, 2315 unsigned long, len, const char __user *, uargs)
2301 unsigned long len,
2302 const char __user *uargs)
2303{ 2316{
2304 struct module *mod; 2317 struct module *mod;
2305 int ret = 0; 2318 int ret = 0;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 157de3a47832..fa07da94d7be 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -10,76 +10,6 @@
10#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
11 11
12/* 12/*
13 * Allocate the thread_group_cputime structure appropriately and fill in the
14 * current values of the fields. Called from copy_signal() via
15 * thread_group_cputime_clone_thread() when adding a second or subsequent
16 * thread to a thread group. Assumes interrupts are enabled when called.
17 */
18int thread_group_cputime_alloc(struct task_struct *tsk)
19{
20 struct signal_struct *sig = tsk->signal;
21 struct task_cputime *cputime;
22
23 /*
24 * If we have multiple threads and we don't already have a
25 * per-CPU task_cputime struct (checked in the caller), allocate
26 * one and fill it in with the times accumulated so far. We may
27 * race with another thread so recheck after we pick up the sighand
28 * lock.
29 */
30 cputime = alloc_percpu(struct task_cputime);
31 if (cputime == NULL)
32 return -ENOMEM;
33 spin_lock_irq(&tsk->sighand->siglock);
34 if (sig->cputime.totals) {
35 spin_unlock_irq(&tsk->sighand->siglock);
36 free_percpu(cputime);
37 return 0;
38 }
39 sig->cputime.totals = cputime;
40 cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
41 cputime->utime = tsk->utime;
42 cputime->stime = tsk->stime;
43 cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
44 spin_unlock_irq(&tsk->sighand->siglock);
45 return 0;
46}
47
48/**
49 * thread_group_cputime - Sum the thread group time fields across all CPUs.
50 *
51 * @tsk: The task we use to identify the thread group.
52 * @times: task_cputime structure in which we return the summed fields.
53 *
54 * Walk the list of CPUs to sum the per-CPU time fields in the thread group
55 * time structure.
56 */
57void thread_group_cputime(
58 struct task_struct *tsk,
59 struct task_cputime *times)
60{
61 struct task_cputime *totals, *tot;
62 int i;
63
64 totals = tsk->signal->cputime.totals;
65 if (!totals) {
66 times->utime = tsk->utime;
67 times->stime = tsk->stime;
68 times->sum_exec_runtime = tsk->se.sum_exec_runtime;
69 return;
70 }
71
72 times->stime = times->utime = cputime_zero;
73 times->sum_exec_runtime = 0;
74 for_each_possible_cpu(i) {
75 tot = per_cpu_ptr(totals, i);
76 times->utime = cputime_add(times->utime, tot->utime);
77 times->stime = cputime_add(times->stime, tot->stime);
78 times->sum_exec_runtime += tot->sum_exec_runtime;
79 }
80}
81
82/*
83 * Called after updating RLIMIT_CPU to set timer expiration if necessary. 13 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
84 */ 14 */
85void update_rlimit_cpu(unsigned long rlim_new) 15void update_rlimit_cpu(unsigned long rlim_new)
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 887c63787de6..052ec4d195c7 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -477,10 +477,9 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
477 477
478/* Create a POSIX.1b interval timer. */ 478/* Create a POSIX.1b interval timer. */
479 479
480asmlinkage long 480SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
481sys_timer_create(const clockid_t which_clock, 481 struct sigevent __user *, timer_event_spec,
482 struct sigevent __user *timer_event_spec, 482 timer_t __user *, created_timer_id)
483 timer_t __user * created_timer_id)
484{ 483{
485 struct k_itimer *new_timer; 484 struct k_itimer *new_timer;
486 int error, new_timer_id; 485 int error, new_timer_id;
@@ -661,8 +660,8 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
661} 660}
662 661
663/* Get the time remaining on a POSIX.1b interval timer. */ 662/* Get the time remaining on a POSIX.1b interval timer. */
664asmlinkage long 663SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
665sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting) 664 struct itimerspec __user *, setting)
666{ 665{
667 struct k_itimer *timr; 666 struct k_itimer *timr;
668 struct itimerspec cur_setting; 667 struct itimerspec cur_setting;
@@ -691,8 +690,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
691 * the call back to do_schedule_next_timer(). So all we need to do is 690 * the call back to do_schedule_next_timer(). So all we need to do is
692 * to pick up the frozen overrun. 691 * to pick up the frozen overrun.
693 */ 692 */
694asmlinkage long 693SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
695sys_timer_getoverrun(timer_t timer_id)
696{ 694{
697 struct k_itimer *timr; 695 struct k_itimer *timr;
698 int overrun; 696 int overrun;
@@ -760,10 +758,9 @@ common_timer_set(struct k_itimer *timr, int flags,
760} 758}
761 759
762/* Set a POSIX.1b interval timer */ 760/* Set a POSIX.1b interval timer */
763asmlinkage long 761SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
764sys_timer_settime(timer_t timer_id, int flags, 762 const struct itimerspec __user *, new_setting,
765 const struct itimerspec __user *new_setting, 763 struct itimerspec __user *, old_setting)
766 struct itimerspec __user *old_setting)
767{ 764{
768 struct k_itimer *timr; 765 struct k_itimer *timr;
769 struct itimerspec new_spec, old_spec; 766 struct itimerspec new_spec, old_spec;
@@ -816,8 +813,7 @@ static inline int timer_delete_hook(struct k_itimer *timer)
816} 813}
817 814
818/* Delete a POSIX.1b interval timer. */ 815/* Delete a POSIX.1b interval timer. */
819asmlinkage long 816SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
820sys_timer_delete(timer_t timer_id)
821{ 817{
822 struct k_itimer *timer; 818 struct k_itimer *timer;
823 unsigned long flags; 819 unsigned long flags;
@@ -903,8 +899,8 @@ int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
903} 899}
904EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); 900EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
905 901
906asmlinkage long sys_clock_settime(const clockid_t which_clock, 902SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
907 const struct timespec __user *tp) 903 const struct timespec __user *, tp)
908{ 904{
909 struct timespec new_tp; 905 struct timespec new_tp;
910 906
@@ -916,8 +912,8 @@ asmlinkage long sys_clock_settime(const clockid_t which_clock,
916 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); 912 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
917} 913}
918 914
919asmlinkage long 915SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
920sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) 916 struct timespec __user *,tp)
921{ 917{
922 struct timespec kernel_tp; 918 struct timespec kernel_tp;
923 int error; 919 int error;
@@ -933,8 +929,8 @@ sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
933 929
934} 930}
935 931
936asmlinkage long 932SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
937sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp) 933 struct timespec __user *, tp)
938{ 934{
939 struct timespec rtn_tp; 935 struct timespec rtn_tp;
940 int error; 936 int error;
@@ -963,10 +959,9 @@ static int common_nsleep(const clockid_t which_clock, int flags,
963 which_clock); 959 which_clock);
964} 960}
965 961
966asmlinkage long 962SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
967sys_clock_nanosleep(const clockid_t which_clock, int flags, 963 const struct timespec __user *, rqtp,
968 const struct timespec __user *rqtp, 964 struct timespec __user *, rmtp)
969 struct timespec __user *rmtp)
970{ 965{
971 struct timespec t; 966 struct timespec t;
972 967
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 597823b5b700..d7a10167a25b 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -4,7 +4,8 @@ EXTRA_CFLAGS += -DDEBUG
4endif 4endif
5 5
6obj-y := main.o 6obj-y := main.o
7obj-$(CONFIG_PM_SLEEP) += process.o console.o 7obj-$(CONFIG_PM_SLEEP) += console.o
8obj-$(CONFIG_FREEZER) += process.o
8obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o 9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
9 10
10obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 11obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 45e8541ab7e3..432ee575c9ee 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -71,6 +71,14 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
71 mutex_unlock(&pm_mutex); 71 mutex_unlock(&pm_mutex);
72} 72}
73 73
74static bool entering_platform_hibernation;
75
76bool system_entering_hibernation(void)
77{
78 return entering_platform_hibernation;
79}
80EXPORT_SYMBOL(system_entering_hibernation);
81
74#ifdef CONFIG_PM_DEBUG 82#ifdef CONFIG_PM_DEBUG
75static void hibernation_debug_sleep(void) 83static void hibernation_debug_sleep(void)
76{ 84{
@@ -411,6 +419,7 @@ int hibernation_platform_enter(void)
411 if (error) 419 if (error)
412 goto Close; 420 goto Close;
413 421
422 entering_platform_hibernation = true;
414 suspend_console(); 423 suspend_console();
415 error = device_suspend(PMSG_HIBERNATE); 424 error = device_suspend(PMSG_HIBERNATE);
416 if (error) { 425 if (error) {
@@ -445,6 +454,7 @@ int hibernation_platform_enter(void)
445 Finish: 454 Finish:
446 hibernation_ops->finish(); 455 hibernation_ops->finish();
447 Resume_devices: 456 Resume_devices:
457 entering_platform_hibernation = false;
448 device_resume(PMSG_RESTORE); 458 device_resume(PMSG_RESTORE);
449 resume_console(); 459 resume_console();
450 Close: 460 Close:
diff --git a/kernel/printk.c b/kernel/printk.c
index 7015733793e8..69188f226a93 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -382,7 +382,7 @@ out:
382 return error; 382 return error;
383} 383}
384 384
385asmlinkage long sys_syslog(int type, char __user *buf, int len) 385SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
386{ 386{
387 return do_syslog(type, buf, len); 387 return do_syslog(type, buf, len);
388} 388}
@@ -742,11 +742,6 @@ EXPORT_SYMBOL(vprintk);
742 742
743#else 743#else
744 744
745asmlinkage long sys_syslog(int type, char __user *buf, int len)
746{
747 return -ENOSYS;
748}
749
750static void call_console_drivers(unsigned start, unsigned end) 745static void call_console_drivers(unsigned start, unsigned end)
751{ 746{
752} 747}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 29dc700e198c..c9cf48b21f05 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -574,7 +574,7 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
574#define arch_ptrace_attach(child) do { } while (0) 574#define arch_ptrace_attach(child) do { } while (0)
575#endif 575#endif
576 576
577asmlinkage long sys_ptrace(long request, long pid, long addr, long data) 577SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
578{ 578{
579 struct task_struct *child; 579 struct task_struct *child;
580 long ret; 580 long ret;
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 490934fc7ac3..bd5a9003497c 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user)
716 raise_rcu_softirq(); 716 raise_rcu_softirq();
717} 717}
718 718
719static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, 719static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
720 struct rcu_data *rdp) 720 struct rcu_data *rdp)
721{ 721{
722 unsigned long flags; 722 unsigned long flags;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f2d8638e6c60..b2fd602a6f6f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu)
1314 * access due to the fact that this CPU cannot possibly have any RCU 1314 * access due to the fact that this CPU cannot possibly have any RCU
1315 * callbacks in flight yet. 1315 * callbacks in flight yet.
1316 */ 1316 */
1317static void 1317static void __cpuinit
1318rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 1318rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1319{ 1319{
1320 unsigned long flags; 1320 unsigned long flags;
diff --git a/kernel/relay.c b/kernel/relay.c
index 09ac2008f77b..9d79b7854fa6 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan,
663 663
664 mutex_lock(&relay_channels_mutex); 664 mutex_lock(&relay_channels_mutex);
665 /* Is chan already set up? */ 665 /* Is chan already set up? */
666 if (unlikely(chan->has_base_filename)) 666 if (unlikely(chan->has_base_filename)) {
667 mutex_unlock(&relay_channels_mutex);
667 return -EEXIST; 668 return -EEXIST;
669 }
668 chan->has_base_filename = 1; 670 chan->has_base_filename = 1;
669 chan->parent = parent; 671 chan->parent = parent;
670 curr_cpu = get_cpu(); 672 curr_cpu = get_cpu();
diff --git a/kernel/resource.c b/kernel/resource.c
index ca6a1536b205..fd5d7d574bb9 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -620,6 +620,7 @@ resource_size_t resource_alignment(struct resource *res)
620 * @start: resource start address 620 * @start: resource start address
621 * @n: resource region size 621 * @n: resource region size
622 * @name: reserving caller's ID string 622 * @name: reserving caller's ID string
623 * @flags: IO resource flags
623 */ 624 */
624struct resource * __request_region(struct resource *parent, 625struct resource * __request_region(struct resource *parent,
625 resource_size_t start, resource_size_t n, 626 resource_size_t start, resource_size_t n,
diff --git a/kernel/sched.c b/kernel/sched.c
index 589e7308c615..186c6fd08acf 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1323 * slice expiry etc. 1323 * slice expiry etc.
1324 */ 1324 */
1325 1325
1326#define WEIGHT_IDLEPRIO 2 1326#define WEIGHT_IDLEPRIO 3
1327#define WMULT_IDLEPRIO (1 << 31) 1327#define WMULT_IDLEPRIO 1431655765
1328 1328
1329/* 1329/*
1330 * Nice levels are multiplicative, with a gentle 10% change for every 1330 * Nice levels are multiplicative, with a gentle 10% change for every
@@ -2266,6 +2266,16 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2266 if (!sched_feat(SYNC_WAKEUPS)) 2266 if (!sched_feat(SYNC_WAKEUPS))
2267 sync = 0; 2267 sync = 0;
2268 2268
2269 if (!sync) {
2270 if (current->se.avg_overlap < sysctl_sched_migration_cost &&
2271 p->se.avg_overlap < sysctl_sched_migration_cost)
2272 sync = 1;
2273 } else {
2274 if (current->se.avg_overlap >= sysctl_sched_migration_cost ||
2275 p->se.avg_overlap >= sysctl_sched_migration_cost)
2276 sync = 0;
2277 }
2278
2269#ifdef CONFIG_SMP 2279#ifdef CONFIG_SMP
2270 if (sched_feat(LB_WAKEUP_UPDATE)) { 2280 if (sched_feat(LB_WAKEUP_UPDATE)) {
2271 struct sched_domain *sd; 2281 struct sched_domain *sd;
@@ -4440,7 +4450,7 @@ void __kprobes sub_preempt_count(int val)
4440 /* 4450 /*
4441 * Underflow? 4451 * Underflow?
4442 */ 4452 */
4443 if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) 4453 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4444 return; 4454 return;
4445 /* 4455 /*
4446 * Is the spinlock portion underflowing? 4456 * Is the spinlock portion underflowing?
@@ -4752,8 +4762,8 @@ EXPORT_SYMBOL(default_wake_function);
4752 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns 4762 * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
4753 * zero in this (rare) case, and we handle it by continuing to scan the queue. 4763 * zero in this (rare) case, and we handle it by continuing to scan the queue.
4754 */ 4764 */
4755static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, 4765void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4756 int nr_exclusive, int sync, void *key) 4766 int nr_exclusive, int sync, void *key)
4757{ 4767{
4758 wait_queue_t *curr, *next; 4768 wait_queue_t *curr, *next;
4759 4769
@@ -5191,7 +5201,7 @@ int can_nice(const struct task_struct *p, const int nice)
5191 * sys_setpriority is a more generic, but much slower function that 5201 * sys_setpriority is a more generic, but much slower function that
5192 * does similar things. 5202 * does similar things.
5193 */ 5203 */
5194asmlinkage long sys_nice(int increment) 5204SYSCALL_DEFINE1(nice, int, increment)
5195{ 5205{
5196 long nice, retval; 5206 long nice, retval;
5197 5207
@@ -5498,8 +5508,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5498 * @policy: new policy. 5508 * @policy: new policy.
5499 * @param: structure containing the new RT priority. 5509 * @param: structure containing the new RT priority.
5500 */ 5510 */
5501asmlinkage long 5511SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5502sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 5512 struct sched_param __user *, param)
5503{ 5513{
5504 /* negative values for policy are not valid */ 5514 /* negative values for policy are not valid */
5505 if (policy < 0) 5515 if (policy < 0)
@@ -5513,7 +5523,7 @@ sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5513 * @pid: the pid in question. 5523 * @pid: the pid in question.
5514 * @param: structure containing the new RT priority. 5524 * @param: structure containing the new RT priority.
5515 */ 5525 */
5516asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) 5526SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
5517{ 5527{
5518 return do_sched_setscheduler(pid, -1, param); 5528 return do_sched_setscheduler(pid, -1, param);
5519} 5529}
@@ -5522,7 +5532,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
5522 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 5532 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5523 * @pid: the pid in question. 5533 * @pid: the pid in question.
5524 */ 5534 */
5525asmlinkage long sys_sched_getscheduler(pid_t pid) 5535SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
5526{ 5536{
5527 struct task_struct *p; 5537 struct task_struct *p;
5528 int retval; 5538 int retval;
@@ -5547,7 +5557,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
5547 * @pid: the pid in question. 5557 * @pid: the pid in question.
5548 * @param: structure containing the RT priority. 5558 * @param: structure containing the RT priority.
5549 */ 5559 */
5550asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) 5560SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
5551{ 5561{
5552 struct sched_param lp; 5562 struct sched_param lp;
5553 struct task_struct *p; 5563 struct task_struct *p;
@@ -5665,8 +5675,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5665 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5675 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5666 * @user_mask_ptr: user-space pointer to the new cpu mask 5676 * @user_mask_ptr: user-space pointer to the new cpu mask
5667 */ 5677 */
5668asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 5678SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5669 unsigned long __user *user_mask_ptr) 5679 unsigned long __user *, user_mask_ptr)
5670{ 5680{
5671 cpumask_var_t new_mask; 5681 cpumask_var_t new_mask;
5672 int retval; 5682 int retval;
@@ -5713,8 +5723,8 @@ out_unlock:
5713 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5723 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5714 * @user_mask_ptr: user-space pointer to hold the current cpu mask 5724 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5715 */ 5725 */
5716asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, 5726SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5717 unsigned long __user *user_mask_ptr) 5727 unsigned long __user *, user_mask_ptr)
5718{ 5728{
5719 int ret; 5729 int ret;
5720 cpumask_var_t mask; 5730 cpumask_var_t mask;
@@ -5743,7 +5753,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
5743 * This function yields the current CPU to other tasks. If there are no 5753 * This function yields the current CPU to other tasks. If there are no
5744 * other threads running on this CPU then this function will return. 5754 * other threads running on this CPU then this function will return.
5745 */ 5755 */
5746asmlinkage long sys_sched_yield(void) 5756SYSCALL_DEFINE0(sched_yield)
5747{ 5757{
5748 struct rq *rq = this_rq_lock(); 5758 struct rq *rq = this_rq_lock();
5749 5759
@@ -5884,7 +5894,7 @@ long __sched io_schedule_timeout(long timeout)
5884 * this syscall returns the maximum rt_priority that can be used 5894 * this syscall returns the maximum rt_priority that can be used
5885 * by a given scheduling class. 5895 * by a given scheduling class.
5886 */ 5896 */
5887asmlinkage long sys_sched_get_priority_max(int policy) 5897SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5888{ 5898{
5889 int ret = -EINVAL; 5899 int ret = -EINVAL;
5890 5900
@@ -5909,7 +5919,7 @@ asmlinkage long sys_sched_get_priority_max(int policy)
5909 * this syscall returns the minimum rt_priority that can be used 5919 * this syscall returns the minimum rt_priority that can be used
5910 * by a given scheduling class. 5920 * by a given scheduling class.
5911 */ 5921 */
5912asmlinkage long sys_sched_get_priority_min(int policy) 5922SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5913{ 5923{
5914 int ret = -EINVAL; 5924 int ret = -EINVAL;
5915 5925
@@ -5934,8 +5944,8 @@ asmlinkage long sys_sched_get_priority_min(int policy)
5934 * this syscall writes the default timeslice value of a given process 5944 * this syscall writes the default timeslice value of a given process
5935 * into the user-space timespec buffer. A value of '0' means infinity. 5945 * into the user-space timespec buffer. A value of '0' means infinity.
5936 */ 5946 */
5937asmlinkage 5947SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5938long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) 5948 struct timespec __user *, interval)
5939{ 5949{
5940 struct task_struct *p; 5950 struct task_struct *p;
5941 unsigned int time_slice; 5951 unsigned int time_slice;
@@ -9115,6 +9125,13 @@ static int tg_schedulable(struct task_group *tg, void *data)
9115 runtime = d->rt_runtime; 9125 runtime = d->rt_runtime;
9116 } 9126 }
9117 9127
9128#ifdef CONFIG_USER_SCHED
9129 if (tg == &root_task_group) {
9130 period = global_rt_period();
9131 runtime = global_rt_runtime();
9132 }
9133#endif
9134
9118 /* 9135 /*
9119 * Cannot have more runtime than the period. 9136 * Cannot have more runtime than the period.
9120 */ 9137 */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8e1352c75557..a7e50ba185ac 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
283 struct sched_entity, 283 struct sched_entity,
284 run_node); 284 run_node);
285 285
286 if (vruntime == cfs_rq->min_vruntime) 286 if (!cfs_rq->curr)
287 vruntime = se->vruntime; 287 vruntime = se->vruntime;
288 else 288 else
289 vruntime = min_vruntime(vruntime, se->vruntime); 289 vruntime = min_vruntime(vruntime, se->vruntime);
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
430 430
431 for_each_sched_entity(se) { 431 for_each_sched_entity(se) {
432 struct load_weight *load = &cfs_rq->load; 432 struct load_weight *load;
433
434 cfs_rq = cfs_rq_of(se);
435 load = &cfs_rq->load;
433 436
434 if (unlikely(!se->on_rq)) { 437 if (unlikely(!se->on_rq)) {
435 struct load_weight lw = cfs_rq->load; 438 struct load_weight lw = cfs_rq->load;
@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
677 unsigned long thresh = sysctl_sched_latency; 680 unsigned long thresh = sysctl_sched_latency;
678 681
679 /* 682 /*
680 * convert the sleeper threshold into virtual time 683 * Convert the sleeper threshold into virtual time.
684 * SCHED_IDLE is a special sub-class. We care about
685 * fairness only relative to other SCHED_IDLE tasks,
686 * all of which have the same weight.
681 */ 687 */
682 if (sched_feat(NORMALIZED_SLEEPER)) 688 if (sched_feat(NORMALIZED_SLEEPER) &&
689 task_of(se)->policy != SCHED_IDLE)
683 thresh = calc_delta_fair(thresh, se); 690 thresh = calc_delta_fair(thresh, se);
684 691
685 vruntime -= thresh; 692 vruntime -= thresh;
@@ -712,7 +719,7 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
712 __enqueue_entity(cfs_rq, se); 719 __enqueue_entity(cfs_rq, se);
713} 720}
714 721
715static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) 722static void __clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
716{ 723{
717 if (cfs_rq->last == se) 724 if (cfs_rq->last == se)
718 cfs_rq->last = NULL; 725 cfs_rq->last = NULL;
@@ -721,6 +728,12 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
721 cfs_rq->next = NULL; 728 cfs_rq->next = NULL;
722} 729}
723 730
731static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
732{
733 for_each_sched_entity(se)
734 __clear_buddies(cfs_rq_of(se), se);
735}
736
724static void 737static void
725dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep) 738dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
726{ 739{
@@ -761,8 +774,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
761 774
762 ideal_runtime = sched_slice(cfs_rq, curr); 775 ideal_runtime = sched_slice(cfs_rq, curr);
763 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; 776 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
764 if (delta_exec > ideal_runtime) 777 if (delta_exec > ideal_runtime) {
765 resched_task(rq_of(cfs_rq)->curr); 778 resched_task(rq_of(cfs_rq)->curr);
779 /*
780 * The current task ran long enough, ensure it doesn't get
781 * re-elected due to buddy favours.
782 */
783 clear_buddies(cfs_rq, curr);
784 }
766} 785}
767 786
768static void 787static void
@@ -1172,20 +1191,15 @@ wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1172 int idx, unsigned long load, unsigned long this_load, 1191 int idx, unsigned long load, unsigned long this_load,
1173 unsigned int imbalance) 1192 unsigned int imbalance)
1174{ 1193{
1175 struct task_struct *curr = this_rq->curr;
1176 struct task_group *tg;
1177 unsigned long tl = this_load; 1194 unsigned long tl = this_load;
1178 unsigned long tl_per_task; 1195 unsigned long tl_per_task;
1196 struct task_group *tg;
1179 unsigned long weight; 1197 unsigned long weight;
1180 int balanced; 1198 int balanced;
1181 1199
1182 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1200 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1183 return 0; 1201 return 0;
1184 1202
1185 if (sync && (curr->se.avg_overlap > sysctl_sched_migration_cost ||
1186 p->se.avg_overlap > sysctl_sched_migration_cost))
1187 sync = 0;
1188
1189 /* 1203 /*
1190 * If sync wakeup then subtract the (maximum possible) 1204 * If sync wakeup then subtract the (maximum possible)
1191 * effect of the currently running task from the load 1205 * effect of the currently running task from the load
@@ -1340,14 +1354,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1340 1354
1341static void set_last_buddy(struct sched_entity *se) 1355static void set_last_buddy(struct sched_entity *se)
1342{ 1356{
1343 for_each_sched_entity(se) 1357 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1344 cfs_rq_of(se)->last = se; 1358 for_each_sched_entity(se)
1359 cfs_rq_of(se)->last = se;
1360 }
1345} 1361}
1346 1362
1347static void set_next_buddy(struct sched_entity *se) 1363static void set_next_buddy(struct sched_entity *se)
1348{ 1364{
1349 for_each_sched_entity(se) 1365 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1350 cfs_rq_of(se)->next = se; 1366 for_each_sched_entity(se)
1367 cfs_rq_of(se)->next = se;
1368 }
1351} 1369}
1352 1370
1353/* 1371/*
@@ -1393,18 +1411,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1393 return; 1411 return;
1394 1412
1395 /* 1413 /*
1396 * Batch tasks do not preempt (their preemption is driven by 1414 * Batch and idle tasks do not preempt (their preemption is driven by
1397 * the tick): 1415 * the tick):
1398 */ 1416 */
1399 if (unlikely(p->policy == SCHED_BATCH)) 1417 if (unlikely(p->policy != SCHED_NORMAL))
1400 return; 1418 return;
1401 1419
1420 /* Idle tasks are by definition preempted by everybody. */
1421 if (unlikely(curr->policy == SCHED_IDLE)) {
1422 resched_task(curr);
1423 return;
1424 }
1425
1402 if (!sched_feat(WAKEUP_PREEMPT)) 1426 if (!sched_feat(WAKEUP_PREEMPT))
1403 return; 1427 return;
1404 1428
1405 if (sched_feat(WAKEUP_OVERLAP) && (sync || 1429 if (sched_feat(WAKEUP_OVERLAP) && sync) {
1406 (se->avg_overlap < sysctl_sched_migration_cost &&
1407 pse->avg_overlap < sysctl_sched_migration_cost))) {
1408 resched_task(curr); 1430 resched_task(curr);
1409 return; 1431 return;
1410 } 1432 }
@@ -1435,6 +1457,11 @@ static struct task_struct *pick_next_task_fair(struct rq *rq)
1435 1457
1436 do { 1458 do {
1437 se = pick_next_entity(cfs_rq); 1459 se = pick_next_entity(cfs_rq);
1460 /*
1461 * If se was a buddy, clear it so that it will have to earn
1462 * the favour again.
1463 */
1464 __clear_buddies(cfs_rq, se);
1438 set_next_entity(cfs_rq, se); 1465 set_next_entity(cfs_rq, se);
1439 cfs_rq = group_cfs_rq(se); 1466 cfs_rq = group_cfs_rq(se);
1440 } while (cfs_rq); 1467 } while (cfs_rq);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 954e1a81b796..bac1061cea2f 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -968,8 +968,8 @@ static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
968 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask)) 968 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
969 return this_cpu; 969 return this_cpu;
970 970
971 first = first_cpu(*mask); 971 first = cpumask_first(mask);
972 if (first != NR_CPUS) 972 if (first < nr_cpu_ids)
973 return first; 973 return first;
974 974
975 return -1; 975 return -1;
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index f2773b5d1226..8ab0cef8ecab 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
296static inline void account_group_user_time(struct task_struct *tsk, 296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime) 297 cputime_t cputime)
298{ 298{
299 struct task_cputime *times;
299 struct signal_struct *sig; 300 struct signal_struct *sig;
300 301
301 /* tsk == current, ensure it is safe to use ->signal */ 302 /* tsk == current, ensure it is safe to use ->signal */
@@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk,
303 return; 304 return;
304 305
305 sig = tsk->signal; 306 sig = tsk->signal;
306 if (sig->cputime.totals) { 307 times = &sig->cputime.totals;
307 struct task_cputime *times;
308 308
309 times = per_cpu_ptr(sig->cputime.totals, get_cpu()); 309 spin_lock(&times->lock);
310 times->utime = cputime_add(times->utime, cputime); 310 times->utime = cputime_add(times->utime, cputime);
311 put_cpu_no_resched(); 311 spin_unlock(&times->lock);
312 }
313} 312}
314 313
315/** 314/**
@@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
325static inline void account_group_system_time(struct task_struct *tsk, 324static inline void account_group_system_time(struct task_struct *tsk,
326 cputime_t cputime) 325 cputime_t cputime)
327{ 326{
327 struct task_cputime *times;
328 struct signal_struct *sig; 328 struct signal_struct *sig;
329 329
330 /* tsk == current, ensure it is safe to use ->signal */ 330 /* tsk == current, ensure it is safe to use ->signal */
@@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk,
332 return; 332 return;
333 333
334 sig = tsk->signal; 334 sig = tsk->signal;
335 if (sig->cputime.totals) { 335 times = &sig->cputime.totals;
336 struct task_cputime *times;
337 336
338 times = per_cpu_ptr(sig->cputime.totals, get_cpu()); 337 spin_lock(&times->lock);
339 times->stime = cputime_add(times->stime, cputime); 338 times->stime = cputime_add(times->stime, cputime);
340 put_cpu_no_resched(); 339 spin_unlock(&times->lock);
341 }
342} 340}
343 341
344/** 342/**
@@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
354static inline void account_group_exec_runtime(struct task_struct *tsk, 352static inline void account_group_exec_runtime(struct task_struct *tsk,
355 unsigned long long ns) 353 unsigned long long ns)
356{ 354{
355 struct task_cputime *times;
357 struct signal_struct *sig; 356 struct signal_struct *sig;
358 357
359 sig = tsk->signal; 358 sig = tsk->signal;
@@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
362 if (unlikely(!sig)) 361 if (unlikely(!sig))
363 return; 362 return;
364 363
365 if (sig->cputime.totals) { 364 times = &sig->cputime.totals;
366 struct task_cputime *times;
367 365
368 times = per_cpu_ptr(sig->cputime.totals, get_cpu()); 366 spin_lock(&times->lock);
369 times->sum_exec_runtime += ns; 367 times->sum_exec_runtime += ns;
370 put_cpu_no_resched(); 368 spin_unlock(&times->lock);
371 }
372} 369}
diff --git a/kernel/signal.c b/kernel/signal.c
index 3152ac3b62e2..b6b36768b758 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -909,7 +909,9 @@ static void print_fatal_signal(struct pt_regs *regs, int signr)
909 } 909 }
910#endif 910#endif
911 printk("\n"); 911 printk("\n");
912 preempt_disable();
912 show_regs(regs); 913 show_regs(regs);
914 preempt_enable();
913} 915}
914 916
915static int __init setup_print_fatal_signals(char *str) 917static int __init setup_print_fatal_signals(char *str)
@@ -1961,7 +1963,7 @@ EXPORT_SYMBOL(unblock_all_signals);
1961 * System call entry points. 1963 * System call entry points.
1962 */ 1964 */
1963 1965
1964asmlinkage long sys_restart_syscall(void) 1966SYSCALL_DEFINE0(restart_syscall)
1965{ 1967{
1966 struct restart_block *restart = &current_thread_info()->restart_block; 1968 struct restart_block *restart = &current_thread_info()->restart_block;
1967 return restart->fn(restart); 1969 return restart->fn(restart);
@@ -2014,8 +2016,8 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2014 return error; 2016 return error;
2015} 2017}
2016 2018
2017asmlinkage long 2019SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2018sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) 2020 sigset_t __user *, oset, size_t, sigsetsize)
2019{ 2021{
2020 int error = -EINVAL; 2022 int error = -EINVAL;
2021 sigset_t old_set, new_set; 2023 sigset_t old_set, new_set;
@@ -2074,8 +2076,7 @@ out:
2074 return error; 2076 return error;
2075} 2077}
2076 2078
2077asmlinkage long 2079SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2078sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2079{ 2080{
2080 return do_sigpending(set, sigsetsize); 2081 return do_sigpending(set, sigsetsize);
2081} 2082}
@@ -2146,11 +2147,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2146 2147
2147#endif 2148#endif
2148 2149
2149asmlinkage long 2150SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2150sys_rt_sigtimedwait(const sigset_t __user *uthese, 2151 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2151 siginfo_t __user *uinfo, 2152 size_t, sigsetsize)
2152 const struct timespec __user *uts,
2153 size_t sigsetsize)
2154{ 2153{
2155 int ret, sig; 2154 int ret, sig;
2156 sigset_t these; 2155 sigset_t these;
@@ -2223,8 +2222,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
2223 return ret; 2222 return ret;
2224} 2223}
2225 2224
2226asmlinkage long 2225SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2227sys_kill(pid_t pid, int sig)
2228{ 2226{
2229 struct siginfo info; 2227 struct siginfo info;
2230 2228
@@ -2283,7 +2281,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
2283 * exists but it's not belonging to the target process anymore. This 2281 * exists but it's not belonging to the target process anymore. This
2284 * method solves the problem of threads exiting and PIDs getting reused. 2282 * method solves the problem of threads exiting and PIDs getting reused.
2285 */ 2283 */
2286asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig) 2284SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2287{ 2285{
2288 /* This is only valid for single tasks */ 2286 /* This is only valid for single tasks */
2289 if (pid <= 0 || tgid <= 0) 2287 if (pid <= 0 || tgid <= 0)
@@ -2295,8 +2293,7 @@ asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2295/* 2293/*
2296 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2294 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2297 */ 2295 */
2298asmlinkage long 2296SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2299sys_tkill(pid_t pid, int sig)
2300{ 2297{
2301 /* This is only valid for single tasks */ 2298 /* This is only valid for single tasks */
2302 if (pid <= 0) 2299 if (pid <= 0)
@@ -2305,8 +2302,8 @@ sys_tkill(pid_t pid, int sig)
2305 return do_tkill(0, pid, sig); 2302 return do_tkill(0, pid, sig);
2306} 2303}
2307 2304
2308asmlinkage long 2305SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2309sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo) 2306 siginfo_t __user *, uinfo)
2310{ 2307{
2311 siginfo_t info; 2308 siginfo_t info;
2312 2309
@@ -2434,8 +2431,7 @@ out:
2434 2431
2435#ifdef __ARCH_WANT_SYS_SIGPENDING 2432#ifdef __ARCH_WANT_SYS_SIGPENDING
2436 2433
2437asmlinkage long 2434SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2438sys_sigpending(old_sigset_t __user *set)
2439{ 2435{
2440 return do_sigpending(set, sizeof(*set)); 2436 return do_sigpending(set, sizeof(*set));
2441} 2437}
@@ -2446,8 +2442,8 @@ sys_sigpending(old_sigset_t __user *set)
2446/* Some platforms have their own version with special arguments others 2442/* Some platforms have their own version with special arguments others
2447 support only sys_rt_sigprocmask. */ 2443 support only sys_rt_sigprocmask. */
2448 2444
2449asmlinkage long 2445SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2450sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) 2446 old_sigset_t __user *, oset)
2451{ 2447{
2452 int error; 2448 int error;
2453 old_sigset_t old_set, new_set; 2449 old_sigset_t old_set, new_set;
@@ -2497,11 +2493,10 @@ out:
2497#endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2493#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2498 2494
2499#ifdef __ARCH_WANT_SYS_RT_SIGACTION 2495#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2500asmlinkage long 2496SYSCALL_DEFINE4(rt_sigaction, int, sig,
2501sys_rt_sigaction(int sig, 2497 const struct sigaction __user *, act,
2502 const struct sigaction __user *act, 2498 struct sigaction __user *, oact,
2503 struct sigaction __user *oact, 2499 size_t, sigsetsize)
2504 size_t sigsetsize)
2505{ 2500{
2506 struct k_sigaction new_sa, old_sa; 2501 struct k_sigaction new_sa, old_sa;
2507 int ret = -EINVAL; 2502 int ret = -EINVAL;
@@ -2531,15 +2526,13 @@ out:
2531/* 2526/*
2532 * For backwards compatibility. Functionality superseded by sigprocmask. 2527 * For backwards compatibility. Functionality superseded by sigprocmask.
2533 */ 2528 */
2534asmlinkage long 2529SYSCALL_DEFINE0(sgetmask)
2535sys_sgetmask(void)
2536{ 2530{
2537 /* SMP safe */ 2531 /* SMP safe */
2538 return current->blocked.sig[0]; 2532 return current->blocked.sig[0];
2539} 2533}
2540 2534
2541asmlinkage long 2535SYSCALL_DEFINE1(ssetmask, int, newmask)
2542sys_ssetmask(int newmask)
2543{ 2536{
2544 int old; 2537 int old;
2545 2538
@@ -2559,8 +2552,7 @@ sys_ssetmask(int newmask)
2559/* 2552/*
2560 * For backwards compatibility. Functionality superseded by sigaction. 2553 * For backwards compatibility. Functionality superseded by sigaction.
2561 */ 2554 */
2562asmlinkage unsigned long 2555SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2563sys_signal(int sig, __sighandler_t handler)
2564{ 2556{
2565 struct k_sigaction new_sa, old_sa; 2557 struct k_sigaction new_sa, old_sa;
2566 int ret; 2558 int ret;
@@ -2577,8 +2569,7 @@ sys_signal(int sig, __sighandler_t handler)
2577 2569
2578#ifdef __ARCH_WANT_SYS_PAUSE 2570#ifdef __ARCH_WANT_SYS_PAUSE
2579 2571
2580asmlinkage long 2572SYSCALL_DEFINE0(pause)
2581sys_pause(void)
2582{ 2573{
2583 current->state = TASK_INTERRUPTIBLE; 2574 current->state = TASK_INTERRUPTIBLE;
2584 schedule(); 2575 schedule();
@@ -2588,7 +2579,7 @@ sys_pause(void)
2588#endif 2579#endif
2589 2580
2590#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2581#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2591asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) 2582SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2592{ 2583{
2593 sigset_t newset; 2584 sigset_t newset;
2594 2585
diff --git a/kernel/smp.c b/kernel/smp.c
index 5cfa0e5e3e88..bbedbb7efe32 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -18,6 +18,7 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);
18enum { 18enum {
19 CSD_FLAG_WAIT = 0x01, 19 CSD_FLAG_WAIT = 0x01,
20 CSD_FLAG_ALLOC = 0x02, 20 CSD_FLAG_ALLOC = 0x02,
21 CSD_FLAG_LOCK = 0x04,
21}; 22};
22 23
23struct call_function_data { 24struct call_function_data {
@@ -186,6 +187,9 @@ void generic_smp_call_function_single_interrupt(void)
186 if (data_flags & CSD_FLAG_WAIT) { 187 if (data_flags & CSD_FLAG_WAIT) {
187 smp_wmb(); 188 smp_wmb();
188 data->flags &= ~CSD_FLAG_WAIT; 189 data->flags &= ~CSD_FLAG_WAIT;
190 } else if (data_flags & CSD_FLAG_LOCK) {
191 smp_wmb();
192 data->flags &= ~CSD_FLAG_LOCK;
189 } else if (data_flags & CSD_FLAG_ALLOC) 193 } else if (data_flags & CSD_FLAG_ALLOC)
190 kfree(data); 194 kfree(data);
191 } 195 }
@@ -196,6 +200,8 @@ void generic_smp_call_function_single_interrupt(void)
196 } 200 }
197} 201}
198 202
203static DEFINE_PER_CPU(struct call_single_data, csd_data);
204
199/* 205/*
200 * smp_call_function_single - Run a function on a specific CPU 206 * smp_call_function_single - Run a function on a specific CPU
201 * @func: The function to run. This must be fast and non-blocking. 207 * @func: The function to run. This must be fast and non-blocking.
@@ -224,14 +230,38 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
224 func(info); 230 func(info);
225 local_irq_restore(flags); 231 local_irq_restore(flags);
226 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 232 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
227 struct call_single_data *data = NULL; 233 struct call_single_data *data;
228 234
229 if (!wait) { 235 if (!wait) {
236 /*
237 * We are calling a function on a single CPU
238 * and we are not going to wait for it to finish.
239 * We first try to allocate the data, but if we
240 * fail, we fall back to use a per cpu data to pass
241 * the information to that CPU. Since all callers
242 * of this code will use the same data, we must
243 * synchronize the callers to prevent a new caller
244 * from corrupting the data before the callee
245 * can access it.
246 *
247 * The CSD_FLAG_LOCK is used to let us know when
248 * the IPI handler is done with the data.
249 * The first caller will set it, and the callee
250 * will clear it. The next caller must wait for
251 * it to clear before we set it again. This
252 * will make sure the callee is done with the
253 * data before a new caller will use it.
254 */
230 data = kmalloc(sizeof(*data), GFP_ATOMIC); 255 data = kmalloc(sizeof(*data), GFP_ATOMIC);
231 if (data) 256 if (data)
232 data->flags = CSD_FLAG_ALLOC; 257 data->flags = CSD_FLAG_ALLOC;
233 } 258 else {
234 if (!data) { 259 data = &per_cpu(csd_data, me);
260 while (data->flags & CSD_FLAG_LOCK)
261 cpu_relax();
262 data->flags = CSD_FLAG_LOCK;
263 }
264 } else {
235 data = &d; 265 data = &d;
236 data->flags = CSD_FLAG_WAIT; 266 data->flags = CSD_FLAG_WAIT;
237 } 267 }
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d9188c66278a..85d5a2455103 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -16,6 +16,7 @@
16#include <linux/lockdep.h> 16#include <linux/lockdep.h>
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/sysctl.h>
19 20
20#include <asm/irq_regs.h> 21#include <asm/irq_regs.h>
21 22
@@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void)
88} 89}
89EXPORT_SYMBOL(touch_all_softlockup_watchdogs); 90EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
90 91
92int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
93 struct file *filp, void __user *buffer,
94 size_t *lenp, loff_t *ppos)
95{
96 touch_all_softlockup_watchdogs();
97 return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
98}
99
91/* 100/*
92 * This callback runs from the timer interrupt, and checks 101 * This callback runs from the timer interrupt, and checks
93 * whether the watchdog thread has hung or not: 102 * whether the watchdog thread has hung or not:
diff --git a/kernel/sys.c b/kernel/sys.c
index 763c3c17ded3..f145c415bc16 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -143,7 +143,7 @@ out:
143 return error; 143 return error;
144} 144}
145 145
146asmlinkage long sys_setpriority(int which, int who, int niceval) 146SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
147{ 147{
148 struct task_struct *g, *p; 148 struct task_struct *g, *p;
149 struct user_struct *user; 149 struct user_struct *user;
@@ -208,7 +208,7 @@ out:
208 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 208 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
209 * to stay compatible. 209 * to stay compatible.
210 */ 210 */
211asmlinkage long sys_getpriority(int which, int who) 211SYSCALL_DEFINE2(getpriority, int, which, int, who)
212{ 212{
213 struct task_struct *g, *p; 213 struct task_struct *g, *p;
214 struct user_struct *user; 214 struct user_struct *user;
@@ -355,7 +355,8 @@ EXPORT_SYMBOL_GPL(kernel_power_off);
355 * 355 *
356 * reboot doesn't sync: do that yourself before calling this. 356 * reboot doesn't sync: do that yourself before calling this.
357 */ 357 */
358asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) 358SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
359 void __user *, arg)
359{ 360{
360 char buffer[256]; 361 char buffer[256];
361 362
@@ -478,7 +479,7 @@ void ctrl_alt_del(void)
478 * SMP: There are not races, the GIDs are checked only by filesystem 479 * SMP: There are not races, the GIDs are checked only by filesystem
479 * operations (as far as semantic preservation is concerned). 480 * operations (as far as semantic preservation is concerned).
480 */ 481 */
481asmlinkage long sys_setregid(gid_t rgid, gid_t egid) 482SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
482{ 483{
483 const struct cred *old; 484 const struct cred *old;
484 struct cred *new; 485 struct cred *new;
@@ -529,7 +530,7 @@ error:
529 * 530 *
530 * SMP: Same implicit races as above. 531 * SMP: Same implicit races as above.
531 */ 532 */
532asmlinkage long sys_setgid(gid_t gid) 533SYSCALL_DEFINE1(setgid, gid_t, gid)
533{ 534{
534 const struct cred *old; 535 const struct cred *old;
535 struct cred *new; 536 struct cred *new;
@@ -597,7 +598,7 @@ static int set_user(struct cred *new)
597 * 100% compatible with BSD. A program which uses just setuid() will be 598 * 100% compatible with BSD. A program which uses just setuid() will be
598 * 100% compatible with POSIX with saved IDs. 599 * 100% compatible with POSIX with saved IDs.
599 */ 600 */
600asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) 601SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
601{ 602{
602 const struct cred *old; 603 const struct cred *old;
603 struct cred *new; 604 struct cred *new;
@@ -661,7 +662,7 @@ error:
661 * will allow a root program to temporarily drop privileges and be able to 662 * will allow a root program to temporarily drop privileges and be able to
662 * regain them by swapping the real and effective uid. 663 * regain them by swapping the real and effective uid.
663 */ 664 */
664asmlinkage long sys_setuid(uid_t uid) 665SYSCALL_DEFINE1(setuid, uid_t, uid)
665{ 666{
666 const struct cred *old; 667 const struct cred *old;
667 struct cred *new; 668 struct cred *new;
@@ -705,7 +706,7 @@ error:
705 * This function implements a generic ability to update ruid, euid, 706 * This function implements a generic ability to update ruid, euid,
706 * and suid. This allows you to implement the 4.4 compatible seteuid(). 707 * and suid. This allows you to implement the 4.4 compatible seteuid().
707 */ 708 */
708asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 709SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
709{ 710{
710 const struct cred *old; 711 const struct cred *old;
711 struct cred *new; 712 struct cred *new;
@@ -756,7 +757,7 @@ error:
756 return retval; 757 return retval;
757} 758}
758 759
759asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) 760SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
760{ 761{
761 const struct cred *cred = current_cred(); 762 const struct cred *cred = current_cred();
762 int retval; 763 int retval;
@@ -771,7 +772,7 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us
771/* 772/*
772 * Same as above, but for rgid, egid, sgid. 773 * Same as above, but for rgid, egid, sgid.
773 */ 774 */
774asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 775SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
775{ 776{
776 const struct cred *old; 777 const struct cred *old;
777 struct cred *new; 778 struct cred *new;
@@ -814,7 +815,7 @@ error:
814 return retval; 815 return retval;
815} 816}
816 817
817asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) 818SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
818{ 819{
819 const struct cred *cred = current_cred(); 820 const struct cred *cred = current_cred();
820 int retval; 821 int retval;
@@ -833,7 +834,7 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us
833 * whatever uid it wants to). It normally shadows "euid", except when 834 * whatever uid it wants to). It normally shadows "euid", except when
834 * explicitly set by setfsuid() or for access.. 835 * explicitly set by setfsuid() or for access..
835 */ 836 */
836asmlinkage long sys_setfsuid(uid_t uid) 837SYSCALL_DEFINE1(setfsuid, uid_t, uid)
837{ 838{
838 const struct cred *old; 839 const struct cred *old;
839 struct cred *new; 840 struct cred *new;
@@ -870,7 +871,7 @@ change_okay:
870/* 871/*
871 * Samma på svenska.. 872 * Samma på svenska..
872 */ 873 */
873asmlinkage long sys_setfsgid(gid_t gid) 874SYSCALL_DEFINE1(setfsgid, gid_t, gid)
874{ 875{
875 const struct cred *old; 876 const struct cred *old;
876 struct cred *new; 877 struct cred *new;
@@ -919,7 +920,7 @@ void do_sys_times(struct tms *tms)
919 tms->tms_cstime = cputime_to_clock_t(cstime); 920 tms->tms_cstime = cputime_to_clock_t(cstime);
920} 921}
921 922
922asmlinkage long sys_times(struct tms __user * tbuf) 923SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
923{ 924{
924 if (tbuf) { 925 if (tbuf) {
925 struct tms tmp; 926 struct tms tmp;
@@ -944,7 +945,7 @@ asmlinkage long sys_times(struct tms __user * tbuf)
944 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 945 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
945 * LBT 04.03.94 946 * LBT 04.03.94
946 */ 947 */
947asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) 948SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
948{ 949{
949 struct task_struct *p; 950 struct task_struct *p;
950 struct task_struct *group_leader = current->group_leader; 951 struct task_struct *group_leader = current->group_leader;
@@ -1015,7 +1016,7 @@ out:
1015 return err; 1016 return err;
1016} 1017}
1017 1018
1018asmlinkage long sys_getpgid(pid_t pid) 1019SYSCALL_DEFINE1(getpgid, pid_t, pid)
1019{ 1020{
1020 struct task_struct *p; 1021 struct task_struct *p;
1021 struct pid *grp; 1022 struct pid *grp;
@@ -1045,14 +1046,14 @@ out:
1045 1046
1046#ifdef __ARCH_WANT_SYS_GETPGRP 1047#ifdef __ARCH_WANT_SYS_GETPGRP
1047 1048
1048asmlinkage long sys_getpgrp(void) 1049SYSCALL_DEFINE0(getpgrp)
1049{ 1050{
1050 return sys_getpgid(0); 1051 return sys_getpgid(0);
1051} 1052}
1052 1053
1053#endif 1054#endif
1054 1055
1055asmlinkage long sys_getsid(pid_t pid) 1056SYSCALL_DEFINE1(getsid, pid_t, pid)
1056{ 1057{
1057 struct task_struct *p; 1058 struct task_struct *p;
1058 struct pid *sid; 1059 struct pid *sid;
@@ -1080,7 +1081,7 @@ out:
1080 return retval; 1081 return retval;
1081} 1082}
1082 1083
1083asmlinkage long sys_setsid(void) 1084SYSCALL_DEFINE0(setsid)
1084{ 1085{
1085 struct task_struct *group_leader = current->group_leader; 1086 struct task_struct *group_leader = current->group_leader;
1086 struct pid *sid = task_pid(group_leader); 1087 struct pid *sid = task_pid(group_leader);
@@ -1311,7 +1312,7 @@ int set_current_groups(struct group_info *group_info)
1311 1312
1312EXPORT_SYMBOL(set_current_groups); 1313EXPORT_SYMBOL(set_current_groups);
1313 1314
1314asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) 1315SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
1315{ 1316{
1316 const struct cred *cred = current_cred(); 1317 const struct cred *cred = current_cred();
1317 int i; 1318 int i;
@@ -1340,7 +1341,7 @@ out:
1340 * without another task interfering. 1341 * without another task interfering.
1341 */ 1342 */
1342 1343
1343asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) 1344SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
1344{ 1345{
1345 struct group_info *group_info; 1346 struct group_info *group_info;
1346 int retval; 1347 int retval;
@@ -1394,7 +1395,7 @@ EXPORT_SYMBOL(in_egroup_p);
1394 1395
1395DECLARE_RWSEM(uts_sem); 1396DECLARE_RWSEM(uts_sem);
1396 1397
1397asmlinkage long sys_newuname(struct new_utsname __user * name) 1398SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1398{ 1399{
1399 int errno = 0; 1400 int errno = 0;
1400 1401
@@ -1405,7 +1406,7 @@ asmlinkage long sys_newuname(struct new_utsname __user * name)
1405 return errno; 1406 return errno;
1406} 1407}
1407 1408
1408asmlinkage long sys_sethostname(char __user *name, int len) 1409SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1409{ 1410{
1410 int errno; 1411 int errno;
1411 char tmp[__NEW_UTS_LEN]; 1412 char tmp[__NEW_UTS_LEN];
@@ -1429,7 +1430,7 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1429 1430
1430#ifdef __ARCH_WANT_SYS_GETHOSTNAME 1431#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1431 1432
1432asmlinkage long sys_gethostname(char __user *name, int len) 1433SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1433{ 1434{
1434 int i, errno; 1435 int i, errno;
1435 struct new_utsname *u; 1436 struct new_utsname *u;
@@ -1454,7 +1455,7 @@ asmlinkage long sys_gethostname(char __user *name, int len)
1454 * Only setdomainname; getdomainname can be implemented by calling 1455 * Only setdomainname; getdomainname can be implemented by calling
1455 * uname() 1456 * uname()
1456 */ 1457 */
1457asmlinkage long sys_setdomainname(char __user *name, int len) 1458SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1458{ 1459{
1459 int errno; 1460 int errno;
1460 char tmp[__NEW_UTS_LEN]; 1461 char tmp[__NEW_UTS_LEN];
@@ -1477,7 +1478,7 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
1477 return errno; 1478 return errno;
1478} 1479}
1479 1480
1480asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1481SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1481{ 1482{
1482 if (resource >= RLIM_NLIMITS) 1483 if (resource >= RLIM_NLIMITS)
1483 return -EINVAL; 1484 return -EINVAL;
@@ -1496,7 +1497,8 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1496 * Back compatibility for getrlimit. Needed for some apps. 1497 * Back compatibility for getrlimit. Needed for some apps.
1497 */ 1498 */
1498 1499
1499asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1500SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1501 struct rlimit __user *, rlim)
1500{ 1502{
1501 struct rlimit x; 1503 struct rlimit x;
1502 if (resource >= RLIM_NLIMITS) 1504 if (resource >= RLIM_NLIMITS)
@@ -1514,7 +1516,7 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
1514 1516
1515#endif 1517#endif
1516 1518
1517asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1519SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1518{ 1520{
1519 struct rlimit new_rlim, *old_rlim; 1521 struct rlimit new_rlim, *old_rlim;
1520 int retval; 1522 int retval;
@@ -1523,22 +1525,14 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1523 return -EINVAL; 1525 return -EINVAL;
1524 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1526 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1525 return -EFAULT; 1527 return -EFAULT;
1528 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1529 return -EINVAL;
1526 old_rlim = current->signal->rlim + resource; 1530 old_rlim = current->signal->rlim + resource;
1527 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1531 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1528 !capable(CAP_SYS_RESOURCE)) 1532 !capable(CAP_SYS_RESOURCE))
1529 return -EPERM; 1533 return -EPERM;
1530 1534 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
1531 if (resource == RLIMIT_NOFILE) { 1535 return -EPERM;
1532 if (new_rlim.rlim_max == RLIM_INFINITY)
1533 new_rlim.rlim_max = sysctl_nr_open;
1534 if (new_rlim.rlim_cur == RLIM_INFINITY)
1535 new_rlim.rlim_cur = sysctl_nr_open;
1536 if (new_rlim.rlim_max > sysctl_nr_open)
1537 return -EPERM;
1538 }
1539
1540 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1541 return -EINVAL;
1542 1536
1543 retval = security_task_setrlimit(resource, &new_rlim); 1537 retval = security_task_setrlimit(resource, &new_rlim);
1544 if (retval) 1538 if (retval)
@@ -1687,7 +1681,7 @@ int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1687 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1681 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1688} 1682}
1689 1683
1690asmlinkage long sys_getrusage(int who, struct rusage __user *ru) 1684SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1691{ 1685{
1692 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1686 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1693 who != RUSAGE_THREAD) 1687 who != RUSAGE_THREAD)
@@ -1695,14 +1689,14 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1695 return getrusage(current, who, ru); 1689 return getrusage(current, who, ru);
1696} 1690}
1697 1691
1698asmlinkage long sys_umask(int mask) 1692SYSCALL_DEFINE1(umask, int, mask)
1699{ 1693{
1700 mask = xchg(&current->fs->umask, mask & S_IRWXUGO); 1694 mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1701 return mask; 1695 return mask;
1702} 1696}
1703 1697
1704asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1698SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1705 unsigned long arg4, unsigned long arg5) 1699 unsigned long, arg4, unsigned long, arg5)
1706{ 1700{
1707 struct task_struct *me = current; 1701 struct task_struct *me = current;
1708 unsigned char comm[sizeof(me->comm)]; 1702 unsigned char comm[sizeof(me->comm)];
@@ -1815,8 +1809,8 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1815 return error; 1809 return error;
1816} 1810}
1817 1811
1818asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, 1812SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
1819 struct getcpu_cache __user *unused) 1813 struct getcpu_cache __user *, unused)
1820{ 1814{
1821 int err = 0; 1815 int err = 0;
1822 int cpu = raw_smp_processor_id(); 1816 int cpu = raw_smp_processor_id();
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index e14a23281707..27dad2967387 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -131,6 +131,7 @@ cond_syscall(sys_io_destroy);
131cond_syscall(sys_io_submit); 131cond_syscall(sys_io_submit);
132cond_syscall(sys_io_cancel); 132cond_syscall(sys_io_cancel);
133cond_syscall(sys_io_getevents); 133cond_syscall(sys_io_getevents);
134cond_syscall(sys_syslog);
134 135
135/* arch-specific weak syscall entries */ 136/* arch-specific weak syscall entries */
136cond_syscall(sys_pciconfig_read); 137cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 89d74436318c..790f9d785663 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -144,6 +144,7 @@ extern int acct_parm[];
144 144
145#ifdef CONFIG_IA64 145#ifdef CONFIG_IA64
146extern int no_unaligned_warning; 146extern int no_unaligned_warning;
147extern int unaligned_dump_stack;
147#endif 148#endif
148 149
149#ifdef CONFIG_RT_MUTEXES 150#ifdef CONFIG_RT_MUTEXES
@@ -781,6 +782,14 @@ static struct ctl_table kern_table[] = {
781 .mode = 0644, 782 .mode = 0644,
782 .proc_handler = &proc_dointvec, 783 .proc_handler = &proc_dointvec,
783 }, 784 },
785 {
786 .ctl_name = CTL_UNNUMBERED,
787 .procname = "unaligned-dump-stack",
788 .data = &unaligned_dump_stack,
789 .maxlen = sizeof (int),
790 .mode = 0644,
791 .proc_handler = &proc_dointvec,
792 },
784#endif 793#endif
785#ifdef CONFIG_DETECT_SOFTLOCKUP 794#ifdef CONFIG_DETECT_SOFTLOCKUP
786 { 795 {
@@ -800,7 +809,7 @@ static struct ctl_table kern_table[] = {
800 .data = &softlockup_thresh, 809 .data = &softlockup_thresh,
801 .maxlen = sizeof(int), 810 .maxlen = sizeof(int),
802 .mode = 0644, 811 .mode = 0644,
803 .proc_handler = &proc_dointvec_minmax, 812 .proc_handler = &proc_dosoftlockup_thresh,
804 .strategy = &sysctl_intvec, 813 .strategy = &sysctl_intvec,
805 .extra1 = &neg_one, 814 .extra1 = &neg_one,
806 .extra2 = &sixty, 815 .extra2 = &sixty,
@@ -1688,7 +1697,7 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol
1688 return error; 1697 return error;
1689} 1698}
1690 1699
1691asmlinkage long sys_sysctl(struct __sysctl_args __user *args) 1700SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
1692{ 1701{
1693 struct __sysctl_args tmp; 1702 struct __sysctl_args tmp;
1694 int error; 1703 int error;
@@ -2989,7 +2998,7 @@ int sysctl_ms_jiffies(struct ctl_table *table,
2989#else /* CONFIG_SYSCTL_SYSCALL */ 2998#else /* CONFIG_SYSCTL_SYSCALL */
2990 2999
2991 3000
2992asmlinkage long sys_sysctl(struct __sysctl_args __user *args) 3001SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
2993{ 3002{
2994 struct __sysctl_args tmp; 3003 struct __sysctl_args tmp;
2995 int error; 3004 int error;
diff --git a/kernel/time.c b/kernel/time.c
index 4886e3ce83a4..29511943871a 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(sys_tz);
60 * why not move it into the appropriate arch directory (for those 60 * why not move it into the appropriate arch directory (for those
61 * architectures that need it). 61 * architectures that need it).
62 */ 62 */
63asmlinkage long sys_time(time_t __user * tloc) 63SYSCALL_DEFINE1(time, time_t __user *, tloc)
64{ 64{
65 time_t i = get_seconds(); 65 time_t i = get_seconds();
66 66
@@ -79,7 +79,7 @@ asmlinkage long sys_time(time_t __user * tloc)
79 * architectures that need it). 79 * architectures that need it).
80 */ 80 */
81 81
82asmlinkage long sys_stime(time_t __user *tptr) 82SYSCALL_DEFINE1(stime, time_t __user *, tptr)
83{ 83{
84 struct timespec tv; 84 struct timespec tv;
85 int err; 85 int err;
@@ -99,8 +99,8 @@ asmlinkage long sys_stime(time_t __user *tptr)
99 99
100#endif /* __ARCH_WANT_SYS_TIME */ 100#endif /* __ARCH_WANT_SYS_TIME */
101 101
102asmlinkage long sys_gettimeofday(struct timeval __user *tv, 102SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
103 struct timezone __user *tz) 103 struct timezone __user *, tz)
104{ 104{
105 if (likely(tv != NULL)) { 105 if (likely(tv != NULL)) {
106 struct timeval ktv; 106 struct timeval ktv;
@@ -184,8 +184,8 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
184 return 0; 184 return 0;
185} 185}
186 186
187asmlinkage long sys_settimeofday(struct timeval __user *tv, 187SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
188 struct timezone __user *tz) 188 struct timezone __user *, tz)
189{ 189{
190 struct timeval user_tv; 190 struct timeval user_tv;
191 struct timespec new_ts; 191 struct timespec new_ts;
@@ -205,7 +205,7 @@ asmlinkage long sys_settimeofday(struct timeval __user *tv,
205 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); 205 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
206} 206}
207 207
208asmlinkage long sys_adjtimex(struct timex __user *txc_p) 208SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
209{ 209{
210 struct timex txc; /* Local copy of parameter */ 210 struct timex txc; /* Local copy of parameter */
211 int ret; 211 int ret;
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 63e05d423a09..21a5ca849514 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -274,6 +274,21 @@ out_bc:
274} 274}
275 275
276/* 276/*
277 * Transfer the do_timer job away from a dying cpu.
278 *
279 * Called with interrupts disabled.
280 */
281static void tick_handover_do_timer(int *cpup)
282{
283 if (*cpup == tick_do_timer_cpu) {
284 int cpu = cpumask_first(cpu_online_mask);
285
286 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
287 TICK_DO_TIMER_NONE;
288 }
289}
290
291/*
277 * Shutdown an event device on a given cpu: 292 * Shutdown an event device on a given cpu:
278 * 293 *
279 * This is called on a life CPU, when a CPU is dead. So we cannot 294 * This is called on a life CPU, when a CPU is dead. So we cannot
@@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup)
297 clockevents_exchange_device(dev, NULL); 312 clockevents_exchange_device(dev, NULL);
298 td->evtdev = NULL; 313 td->evtdev = NULL;
299 } 314 }
300 /* Transfer the do_timer job away from this cpu */
301 if (*cpup == tick_do_timer_cpu) {
302 int cpu = cpumask_first(cpu_online_mask);
303
304 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
305 TICK_DO_TIMER_NONE;
306 }
307 spin_unlock_irqrestore(&tick_device_lock, flags); 315 spin_unlock_irqrestore(&tick_device_lock, flags);
308} 316}
309 317
@@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
357 tick_broadcast_oneshot_control(reason); 365 tick_broadcast_oneshot_control(reason);
358 break; 366 break;
359 367
368 case CLOCK_EVT_NOTIFY_CPU_DYING:
369 tick_handover_do_timer(dev);
370 break;
371
360 case CLOCK_EVT_NOTIFY_CPU_DEAD: 372 case CLOCK_EVT_NOTIFY_CPU_DEAD:
361 tick_shutdown_broadcast_oneshot(dev); 373 tick_shutdown_broadcast_oneshot(dev);
362 tick_shutdown_broadcast(dev); 374 tick_shutdown_broadcast(dev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1b6c05bd0d0a..d3f1ef4d5cbe 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz);
134 * value. We do this unconditionally on any cpu, as we don't know whether the 134 * value. We do this unconditionally on any cpu, as we don't know whether the
135 * cpu, which has the update task assigned is in a long sleep. 135 * cpu, which has the update task assigned is in a long sleep.
136 */ 136 */
137void tick_nohz_update_jiffies(void) 137static void tick_nohz_update_jiffies(void)
138{ 138{
139 int cpu = smp_processor_id(); 139 int cpu = smp_processor_id();
140 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 140 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
diff --git a/kernel/timer.c b/kernel/timer.c
index dee3f641a7a7..13dd64fe143d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1129,7 +1129,7 @@ void do_timer(unsigned long ticks)
1129 * For backwards compatibility? This can be done in libc so Alpha 1129 * For backwards compatibility? This can be done in libc so Alpha
1130 * and all newer ports shouldn't need it. 1130 * and all newer ports shouldn't need it.
1131 */ 1131 */
1132asmlinkage unsigned long sys_alarm(unsigned int seconds) 1132SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1133{ 1133{
1134 return alarm_setitimer(seconds); 1134 return alarm_setitimer(seconds);
1135} 1135}
@@ -1152,7 +1152,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds)
1152 * 1152 *
1153 * This is SMP safe as current->tgid does not change. 1153 * This is SMP safe as current->tgid does not change.
1154 */ 1154 */
1155asmlinkage long sys_getpid(void) 1155SYSCALL_DEFINE0(getpid)
1156{ 1156{
1157 return task_tgid_vnr(current); 1157 return task_tgid_vnr(current);
1158} 1158}
@@ -1163,7 +1163,7 @@ asmlinkage long sys_getpid(void)
1163 * value of ->real_parent under rcu_read_lock(), see 1163 * value of ->real_parent under rcu_read_lock(), see
1164 * release_task()->call_rcu(delayed_put_task_struct). 1164 * release_task()->call_rcu(delayed_put_task_struct).
1165 */ 1165 */
1166asmlinkage long sys_getppid(void) 1166SYSCALL_DEFINE0(getppid)
1167{ 1167{
1168 int pid; 1168 int pid;
1169 1169
@@ -1174,25 +1174,25 @@ asmlinkage long sys_getppid(void)
1174 return pid; 1174 return pid;
1175} 1175}
1176 1176
1177asmlinkage long sys_getuid(void) 1177SYSCALL_DEFINE0(getuid)
1178{ 1178{
1179 /* Only we change this so SMP safe */ 1179 /* Only we change this so SMP safe */
1180 return current_uid(); 1180 return current_uid();
1181} 1181}
1182 1182
1183asmlinkage long sys_geteuid(void) 1183SYSCALL_DEFINE0(geteuid)
1184{ 1184{
1185 /* Only we change this so SMP safe */ 1185 /* Only we change this so SMP safe */
1186 return current_euid(); 1186 return current_euid();
1187} 1187}
1188 1188
1189asmlinkage long sys_getgid(void) 1189SYSCALL_DEFINE0(getgid)
1190{ 1190{
1191 /* Only we change this so SMP safe */ 1191 /* Only we change this so SMP safe */
1192 return current_gid(); 1192 return current_gid();
1193} 1193}
1194 1194
1195asmlinkage long sys_getegid(void) 1195SYSCALL_DEFINE0(getegid)
1196{ 1196{
1197 /* Only we change this so SMP safe */ 1197 /* Only we change this so SMP safe */
1198 return current_egid(); 1198 return current_egid();
@@ -1308,7 +1308,7 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1308EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1308EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1309 1309
1310/* Thread ID - the internal kernel "pid" */ 1310/* Thread ID - the internal kernel "pid" */
1311asmlinkage long sys_gettid(void) 1311SYSCALL_DEFINE0(gettid)
1312{ 1312{
1313 return task_pid_vnr(current); 1313 return task_pid_vnr(current);
1314} 1314}
@@ -1400,7 +1400,7 @@ out:
1400 return 0; 1400 return 0;
1401} 1401}
1402 1402
1403asmlinkage long sys_sysinfo(struct sysinfo __user *info) 1403SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1404{ 1404{
1405 struct sysinfo val; 1405 struct sysinfo val;
1406 1406
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2f32969c09df..9a236ffe2aa4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -17,6 +17,7 @@
17#include <linux/clocksource.h> 17#include <linux/clocksource.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/suspend.h>
20#include <linux/debugfs.h> 21#include <linux/debugfs.h>
21#include <linux/hardirq.h> 22#include <linux/hardirq.h>
22#include <linux/kthread.h> 23#include <linux/kthread.h>
@@ -1736,9 +1737,12 @@ static void clear_ftrace_pid(struct pid *pid)
1736{ 1737{
1737 struct task_struct *p; 1738 struct task_struct *p;
1738 1739
1740 rcu_read_lock();
1739 do_each_pid_task(pid, PIDTYPE_PID, p) { 1741 do_each_pid_task(pid, PIDTYPE_PID, p) {
1740 clear_tsk_trace_trace(p); 1742 clear_tsk_trace_trace(p);
1741 } while_each_pid_task(pid, PIDTYPE_PID, p); 1743 } while_each_pid_task(pid, PIDTYPE_PID, p);
1744 rcu_read_unlock();
1745
1742 put_pid(pid); 1746 put_pid(pid);
1743} 1747}
1744 1748
@@ -1746,9 +1750,11 @@ static void set_ftrace_pid(struct pid *pid)
1746{ 1750{
1747 struct task_struct *p; 1751 struct task_struct *p;
1748 1752
1753 rcu_read_lock();
1749 do_each_pid_task(pid, PIDTYPE_PID, p) { 1754 do_each_pid_task(pid, PIDTYPE_PID, p) {
1750 set_tsk_trace_trace(p); 1755 set_tsk_trace_trace(p);
1751 } while_each_pid_task(pid, PIDTYPE_PID, p); 1756 } while_each_pid_task(pid, PIDTYPE_PID, p);
1757 rcu_read_unlock();
1752} 1758}
1753 1759
1754static void clear_ftrace_pid_task(struct pid **pid) 1760static void clear_ftrace_pid_task(struct pid **pid)
@@ -1965,6 +1971,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1965#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1971#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1966 1972
1967static atomic_t ftrace_graph_active; 1973static atomic_t ftrace_graph_active;
1974static struct notifier_block ftrace_suspend_notifier;
1968 1975
1969int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 1976int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1970{ 1977{
@@ -2043,6 +2050,27 @@ static int start_graph_tracing(void)
2043 return ret; 2050 return ret;
2044} 2051}
2045 2052
2053/*
2054 * Hibernation protection.
2055 * The state of the current task is too much unstable during
2056 * suspend/restore to disk. We want to protect against that.
2057 */
2058static int
2059ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2060 void *unused)
2061{
2062 switch (state) {
2063 case PM_HIBERNATION_PREPARE:
2064 pause_graph_tracing();
2065 break;
2066
2067 case PM_POST_HIBERNATION:
2068 unpause_graph_tracing();
2069 break;
2070 }
2071 return NOTIFY_DONE;
2072}
2073
2046int register_ftrace_graph(trace_func_graph_ret_t retfunc, 2074int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2047 trace_func_graph_ent_t entryfunc) 2075 trace_func_graph_ent_t entryfunc)
2048{ 2076{
@@ -2050,6 +2078,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2050 2078
2051 mutex_lock(&ftrace_sysctl_lock); 2079 mutex_lock(&ftrace_sysctl_lock);
2052 2080
2081 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2082 register_pm_notifier(&ftrace_suspend_notifier);
2083
2053 atomic_inc(&ftrace_graph_active); 2084 atomic_inc(&ftrace_graph_active);
2054 ret = start_graph_tracing(); 2085 ret = start_graph_tracing();
2055 if (ret) { 2086 if (ret) {
@@ -2075,6 +2106,7 @@ void unregister_ftrace_graph(void)
2075 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 2106 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2076 ftrace_graph_entry = ftrace_graph_entry_stub; 2107 ftrace_graph_entry = ftrace_graph_entry_stub;
2077 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 2108 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2109 unregister_pm_notifier(&ftrace_suspend_notifier);
2078 2110
2079 mutex_unlock(&ftrace_sysctl_lock); 2111 mutex_unlock(&ftrace_sysctl_lock);
2080} 2112}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8b0daf0662ef..bd38c5cfd8ad 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -246,7 +246,7 @@ static inline int test_time_stamp(u64 delta)
246 return 0; 246 return 0;
247} 247}
248 248
249#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) 249#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data))
250 250
251/* 251/*
252 * head_page == tail_page && head == tail then buffer is empty. 252 * head_page == tail_page && head == tail then buffer is empty.
@@ -1025,12 +1025,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1025 } 1025 }
1026 1026
1027 if (next_page == head_page) { 1027 if (next_page == head_page) {
1028 if (!(buffer->flags & RB_FL_OVERWRITE)) { 1028 if (!(buffer->flags & RB_FL_OVERWRITE))
1029 /* reset write */
1030 if (tail <= BUF_PAGE_SIZE)
1031 local_set(&tail_page->write, tail);
1032 goto out_unlock; 1029 goto out_unlock;
1033 }
1034 1030
1035 /* tail_page has not moved yet? */ 1031 /* tail_page has not moved yet? */
1036 if (tail_page == cpu_buffer->tail_page) { 1032 if (tail_page == cpu_buffer->tail_page) {
@@ -1105,6 +1101,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1105 return event; 1101 return event;
1106 1102
1107 out_unlock: 1103 out_unlock:
1104 /* reset write */
1105 if (tail <= BUF_PAGE_SIZE)
1106 local_set(&tail_page->write, tail);
1107
1108 __raw_spin_unlock(&cpu_buffer->lock); 1108 __raw_spin_unlock(&cpu_buffer->lock);
1109 local_irq_restore(flags); 1109 local_irq_restore(flags);
1110 return NULL; 1110 return NULL;
@@ -2174,6 +2174,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2174 2174
2175 cpu_buffer->overrun = 0; 2175 cpu_buffer->overrun = 0;
2176 cpu_buffer->entries = 0; 2176 cpu_buffer->entries = 0;
2177
2178 cpu_buffer->write_stamp = 0;
2179 cpu_buffer->read_stamp = 0;
2177} 2180}
2178 2181
2179/** 2182/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c580233add95..17bb88d86ac2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -40,7 +40,7 @@
40 40
41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
42 42
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 43unsigned long __read_mostly tracing_max_latency;
44unsigned long __read_mostly tracing_thresh; 44unsigned long __read_mostly tracing_thresh;
45 45
46/* 46/*
@@ -3736,7 +3736,7 @@ static struct notifier_block trace_die_notifier = {
3736 * it if we decide to change what log level the ftrace dump 3736 * it if we decide to change what log level the ftrace dump
3737 * should be at. 3737 * should be at.
3738 */ 3738 */
3739#define KERN_TRACE KERN_INFO 3739#define KERN_TRACE KERN_EMERG
3740 3740
3741static void 3741static void
3742trace_printk_seq(struct trace_seq *s) 3742trace_printk_seq(struct trace_seq *s)
@@ -3770,6 +3770,7 @@ void ftrace_dump(void)
3770 dump_ran = 1; 3770 dump_ran = 1;
3771 3771
3772 /* No turning back! */ 3772 /* No turning back! */
3773 tracing_off();
3773 ftrace_kill(); 3774 ftrace_kill();
3774 3775
3775 for_each_tracing_cpu(cpu) { 3776 for_each_tracing_cpu(cpu) {
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 7c2e326bbc8b..62a78d943534 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -380,6 +380,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr)
380 380
381static void __irqsoff_tracer_init(struct trace_array *tr) 381static void __irqsoff_tracer_init(struct trace_array *tr)
382{ 382{
383 tracing_max_latency = 0;
383 irqsoff_trace = tr; 384 irqsoff_trace = tr;
384 /* make sure that the tracer is visible */ 385 /* make sure that the tracer is visible */
385 smp_wmb(); 386 smp_wmb();
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 43586b689e31..42ae1e77b6b3 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -333,6 +333,7 @@ static void stop_wakeup_tracer(struct trace_array *tr)
333 333
334static int wakeup_tracer_init(struct trace_array *tr) 334static int wakeup_tracer_init(struct trace_array *tr)
335{ 335{
336 tracing_max_latency = 0;
336 wakeup_trace = tr; 337 wakeup_trace = tr;
337 start_wakeup_tracer(tr); 338 start_wakeup_tracer(tr);
338 return 0; 339 return 0;
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 2460c3199b5a..0314501688b9 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -17,7 +17,7 @@
17 17
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19 19
20asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group) 20SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
21{ 21{
22 long ret = sys_chown(filename, low2highuid(user), low2highgid(group)); 22 long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
23 /* avoid REGPARM breakage on x86: */ 23 /* avoid REGPARM breakage on x86: */
@@ -25,7 +25,7 @@ asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gi
25 return ret; 25 return ret;
26} 26}
27 27
28asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group) 28SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
29{ 29{
30 long ret = sys_lchown(filename, low2highuid(user), low2highgid(group)); 30 long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
31 /* avoid REGPARM breakage on x86: */ 31 /* avoid REGPARM breakage on x86: */
@@ -33,7 +33,7 @@ asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_g
33 return ret; 33 return ret;
34} 34}
35 35
36asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group) 36SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group)
37{ 37{
38 long ret = sys_fchown(fd, low2highuid(user), low2highgid(group)); 38 long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
39 /* avoid REGPARM breakage on x86: */ 39 /* avoid REGPARM breakage on x86: */
@@ -41,7 +41,7 @@ asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
41 return ret; 41 return ret;
42} 42}
43 43
44asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid) 44SYSCALL_DEFINE2(setregid16, old_gid_t, rgid, old_gid_t, egid)
45{ 45{
46 long ret = sys_setregid(low2highgid(rgid), low2highgid(egid)); 46 long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
47 /* avoid REGPARM breakage on x86: */ 47 /* avoid REGPARM breakage on x86: */
@@ -49,7 +49,7 @@ asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
49 return ret; 49 return ret;
50} 50}
51 51
52asmlinkage long sys_setgid16(old_gid_t gid) 52SYSCALL_DEFINE1(setgid16, old_gid_t, gid)
53{ 53{
54 long ret = sys_setgid(low2highgid(gid)); 54 long ret = sys_setgid(low2highgid(gid));
55 /* avoid REGPARM breakage on x86: */ 55 /* avoid REGPARM breakage on x86: */
@@ -57,7 +57,7 @@ asmlinkage long sys_setgid16(old_gid_t gid)
57 return ret; 57 return ret;
58} 58}
59 59
60asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid) 60SYSCALL_DEFINE2(setreuid16, old_uid_t, ruid, old_uid_t, euid)
61{ 61{
62 long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid)); 62 long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
63 /* avoid REGPARM breakage on x86: */ 63 /* avoid REGPARM breakage on x86: */
@@ -65,7 +65,7 @@ asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
65 return ret; 65 return ret;
66} 66}
67 67
68asmlinkage long sys_setuid16(old_uid_t uid) 68SYSCALL_DEFINE1(setuid16, old_uid_t, uid)
69{ 69{
70 long ret = sys_setuid(low2highuid(uid)); 70 long ret = sys_setuid(low2highuid(uid));
71 /* avoid REGPARM breakage on x86: */ 71 /* avoid REGPARM breakage on x86: */
@@ -73,7 +73,7 @@ asmlinkage long sys_setuid16(old_uid_t uid)
73 return ret; 73 return ret;
74} 74}
75 75
76asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) 76SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid)
77{ 77{
78 long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid), 78 long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
79 low2highuid(suid)); 79 low2highuid(suid));
@@ -82,7 +82,7 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
82 return ret; 82 return ret;
83} 83}
84 84
85asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) 85SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid)
86{ 86{
87 const struct cred *cred = current_cred(); 87 const struct cred *cred = current_cred();
88 int retval; 88 int retval;
@@ -94,7 +94,7 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid,
94 return retval; 94 return retval;
95} 95}
96 96
97asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) 97SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid)
98{ 98{
99 long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid), 99 long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
100 low2highgid(sgid)); 100 low2highgid(sgid));
@@ -103,7 +103,8 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
103 return ret; 103 return ret;
104} 104}
105 105
106asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) 106
107SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid)
107{ 108{
108 const struct cred *cred = current_cred(); 109 const struct cred *cred = current_cred();
109 int retval; 110 int retval;
@@ -115,7 +116,7 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid,
115 return retval; 116 return retval;
116} 117}
117 118
118asmlinkage long sys_setfsuid16(old_uid_t uid) 119SYSCALL_DEFINE1(setfsuid16, old_uid_t, uid)
119{ 120{
120 long ret = sys_setfsuid(low2highuid(uid)); 121 long ret = sys_setfsuid(low2highuid(uid));
121 /* avoid REGPARM breakage on x86: */ 122 /* avoid REGPARM breakage on x86: */
@@ -123,7 +124,7 @@ asmlinkage long sys_setfsuid16(old_uid_t uid)
123 return ret; 124 return ret;
124} 125}
125 126
126asmlinkage long sys_setfsgid16(old_gid_t gid) 127SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid)
127{ 128{
128 long ret = sys_setfsgid(low2highgid(gid)); 129 long ret = sys_setfsgid(low2highgid(gid));
129 /* avoid REGPARM breakage on x86: */ 130 /* avoid REGPARM breakage on x86: */
@@ -161,7 +162,7 @@ static int groups16_from_user(struct group_info *group_info,
161 return 0; 162 return 0;
162} 163}
163 164
164asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) 165SYSCALL_DEFINE2(getgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
165{ 166{
166 const struct cred *cred = current_cred(); 167 const struct cred *cred = current_cred();
167 int i; 168 int i;
@@ -184,7 +185,7 @@ out:
184 return i; 185 return i;
185} 186}
186 187
187asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) 188SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
188{ 189{
189 struct group_info *group_info; 190 struct group_info *group_info;
190 int retval; 191 int retval;
@@ -209,22 +210,22 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist)
209 return retval; 210 return retval;
210} 211}
211 212
212asmlinkage long sys_getuid16(void) 213SYSCALL_DEFINE0(getuid16)
213{ 214{
214 return high2lowuid(current_uid()); 215 return high2lowuid(current_uid());
215} 216}
216 217
217asmlinkage long sys_geteuid16(void) 218SYSCALL_DEFINE0(geteuid16)
218{ 219{
219 return high2lowuid(current_euid()); 220 return high2lowuid(current_euid());
220} 221}
221 222
222asmlinkage long sys_getgid16(void) 223SYSCALL_DEFINE0(getgid16)
223{ 224{
224 return high2lowgid(current_gid()); 225 return high2lowgid(current_gid());
225} 226}
226 227
227asmlinkage long sys_getegid16(void) 228SYSCALL_DEFINE0(getegid16)
228{ 229{
229 return high2lowgid(current_egid()); 230 return high2lowgid(current_egid());
230} 231}
diff --git a/kernel/wait.c b/kernel/wait.c
index cd87131f2fc2..42a2dbc181c8 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -91,6 +91,15 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
91} 91}
92EXPORT_SYMBOL(prepare_to_wait_exclusive); 92EXPORT_SYMBOL(prepare_to_wait_exclusive);
93 93
94/*
95 * finish_wait - clean up after waiting in a queue
96 * @q: waitqueue waited on
97 * @wait: wait descriptor
98 *
99 * Sets current thread back to running state and removes
100 * the wait descriptor from the given waitqueue if still
101 * queued.
102 */
94void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) 103void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
95{ 104{
96 unsigned long flags; 105 unsigned long flags;
@@ -117,6 +126,39 @@ void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
117} 126}
118EXPORT_SYMBOL(finish_wait); 127EXPORT_SYMBOL(finish_wait);
119 128
129/*
130 * abort_exclusive_wait - abort exclusive waiting in a queue
131 * @q: waitqueue waited on
132 * @wait: wait descriptor
133 * @state: runstate of the waiter to be woken
134 * @key: key to identify a wait bit queue or %NULL
135 *
136 * Sets current thread back to running state and removes
137 * the wait descriptor from the given waitqueue if still
138 * queued.
139 *
140 * Wakes up the next waiter if the caller is concurrently
141 * woken up through the queue.
142 *
143 * This prevents waiter starvation where an exclusive waiter
144 * aborts and is woken up concurrently and noone wakes up
145 * the next waiter.
146 */
147void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
148 unsigned int mode, void *key)
149{
150 unsigned long flags;
151
152 __set_current_state(TASK_RUNNING);
153 spin_lock_irqsave(&q->lock, flags);
154 if (!list_empty(&wait->task_list))
155 list_del_init(&wait->task_list);
156 else if (waitqueue_active(q))
157 __wake_up_common(q, mode, 1, 0, key);
158 spin_unlock_irqrestore(&q->lock, flags);
159}
160EXPORT_SYMBOL(abort_exclusive_wait);
161
120int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) 162int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
121{ 163{
122 int ret = default_wake_function(wait, mode, sync, key); 164 int ret = default_wake_function(wait, mode, sync, key);
@@ -177,17 +219,20 @@ int __sched
177__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, 219__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
178 int (*action)(void *), unsigned mode) 220 int (*action)(void *), unsigned mode)
179{ 221{
180 int ret = 0;
181
182 do { 222 do {
223 int ret;
224
183 prepare_to_wait_exclusive(wq, &q->wait, mode); 225 prepare_to_wait_exclusive(wq, &q->wait, mode);
184 if (test_bit(q->key.bit_nr, q->key.flags)) { 226 if (!test_bit(q->key.bit_nr, q->key.flags))
185 if ((ret = (*action)(q->key.flags))) 227 continue;
186 break; 228 ret = action(q->key.flags);
187 } 229 if (!ret)
230 continue;
231 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
232 return ret;
188 } while (test_and_set_bit(q->key.bit_nr, q->key.flags)); 233 } while (test_and_set_bit(q->key.bit_nr, q->key.flags));
189 finish_wait(wq, &q->wait); 234 finish_wait(wq, &q->wait);
190 return ret; 235 return 0;
191} 236}
192EXPORT_SYMBOL(__wait_on_bit_lock); 237EXPORT_SYMBOL(__wait_on_bit_lock);
193 238
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2f445833ae37..1f0c509b40d3 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -971,6 +971,8 @@ undo:
971} 971}
972 972
973#ifdef CONFIG_SMP 973#ifdef CONFIG_SMP
974static struct workqueue_struct *work_on_cpu_wq __read_mostly;
975
974struct work_for_cpu { 976struct work_for_cpu {
975 struct work_struct work; 977 struct work_struct work;
976 long (*fn)(void *); 978 long (*fn)(void *);
@@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w)
991 * @fn: the function to run 993 * @fn: the function to run
992 * @arg: the function arg 994 * @arg: the function arg
993 * 995 *
994 * This will return -EINVAL in the cpu is not online, or the return value 996 * This will return the value @fn returns.
995 * of @fn otherwise. 997 * It is up to the caller to ensure that the cpu doesn't go offline.
996 */ 998 */
997long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 999long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
998{ 1000{
@@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1001 INIT_WORK(&wfc.work, do_work_for_cpu); 1003 INIT_WORK(&wfc.work, do_work_for_cpu);
1002 wfc.fn = fn; 1004 wfc.fn = fn;
1003 wfc.arg = arg; 1005 wfc.arg = arg;
1004 get_online_cpus(); 1006 queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
1005 if (unlikely(!cpu_online(cpu))) 1007 flush_work(&wfc.work);
1006 wfc.ret = -EINVAL;
1007 else {
1008 schedule_work_on(cpu, &wfc.work);
1009 flush_work(&wfc.work);
1010 }
1011 put_online_cpus();
1012 1008
1013 return wfc.ret; 1009 return wfc.ret;
1014} 1010}
@@ -1025,4 +1021,8 @@ void __init init_workqueues(void)
1025 hotcpu_notifier(workqueue_cpu_callback, 0); 1021 hotcpu_notifier(workqueue_cpu_callback, 0);
1026 keventd_wq = create_workqueue("events"); 1022 keventd_wq = create_workqueue("events");
1027 BUG_ON(!keventd_wq); 1023 BUG_ON(!keventd_wq);
1024#ifdef CONFIG_SMP
1025 work_on_cpu_wq = create_workqueue("work_on_cpu");
1026 BUG_ON(!work_on_cpu_wq);
1027#endif
1028} 1028}