aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile5
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/capability.c4
-rw-r--r--kernel/cgroup.c28
-rw-r--r--kernel/cpuset.c2
-rw-r--r--kernel/dma-coherent.c47
-rw-r--r--kernel/exec_domain.c3
-rw-r--r--kernel/exit.c17
-rw-r--r--kernel/fork.c20
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/hrtimer.c49
-rw-r--r--kernel/irq/handle.c16
-rw-r--r--kernel/irq/manage.c10
-rw-r--r--kernel/itimer.c7
-rw-r--r--kernel/kallsyms.c16
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/module.c10
-rw-r--r--kernel/posix-cpu-timers.c70
-rw-r--r--kernel/posix-timers.c43
-rw-r--r--kernel/power/Makefile3
-rw-r--r--kernel/power/disk.c10
-rw-r--r--kernel/printk.c7
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c2
-rw-r--r--kernel/rcutree.c2
-rw-r--r--kernel/relay.c4
-rw-r--r--kernel/resource.c1
-rw-r--r--kernel/sched.c43
-rw-r--r--kernel/sched_fair.c37
-rw-r--r--kernel/sched_stats.h33
-rw-r--r--kernel/signal.c59
-rw-r--r--kernel/softlockup.c9
-rw-r--r--kernel/sys.c70
-rw-r--r--kernel/sys_ni.c1
-rw-r--r--kernel/sysctl.c15
-rw-r--r--kernel/time.c14
-rw-r--r--kernel/time/tick-common.c26
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/timer.c18
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/ring_buffer.c15
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace_irqsoff.c1
-rw-r--r--kernel/trace/trace_sched_wakeup.c1
-rw-r--r--kernel/uid16.c39
-rw-r--r--kernel/workqueue.c20
47 files changed, 452 insertions, 387 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 2aebc4cd7878..170a9213c1b6 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -40,9 +40,8 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
40obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o 40obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
41obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o 41obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
42obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o 42obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
43ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y) 43obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
44obj-y += smp.o 44ifneq ($(CONFIG_SMP),y)
45else
46obj-y += up.o 45obj-y += up.o
47endif 46endif
48obj-$(CONFIG_SMP) += spinlock.o 47obj-$(CONFIG_SMP) += spinlock.o
diff --git a/kernel/acct.c b/kernel/acct.c
index d57b7cbb98b6..7afa31564162 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -277,7 +277,7 @@ static int acct_on(char *name)
277 * should be written. If the filename is NULL, accounting will be 277 * should be written. If the filename is NULL, accounting will be
278 * shutdown. 278 * shutdown.
279 */ 279 */
280asmlinkage long sys_acct(const char __user *name) 280SYSCALL_DEFINE1(acct, const char __user *, name)
281{ 281{
282 int error; 282 int error;
283 283
diff --git a/kernel/capability.c b/kernel/capability.c
index 688926e496be..4e17041963f5 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -161,7 +161,7 @@ static inline int cap_get_target_pid(pid_t pid, kernel_cap_t *pEp,
161 * 161 *
162 * Returns 0 on success and < 0 on error. 162 * Returns 0 on success and < 0 on error.
163 */ 163 */
164asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) 164SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
165{ 165{
166 int ret = 0; 166 int ret = 0;
167 pid_t pid; 167 pid_t pid;
@@ -235,7 +235,7 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
235 * 235 *
236 * Returns 0 on success and < 0 on error. 236 * Returns 0 on success and < 0 on error.
237 */ 237 */
238asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) 238SYSCALL_DEFINE2(capset, cap_user_header_t, header, const cap_user_data_t, data)
239{ 239{
240 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S]; 240 struct __user_cap_data_struct kdata[_KERNEL_CAPABILITY_U32S];
241 unsigned i, tocopy; 241 unsigned i, tocopy;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c29831076e7a..5a54ff42874e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1115,8 +1115,10 @@ static void cgroup_kill_sb(struct super_block *sb) {
1115 } 1115 }
1116 write_unlock(&css_set_lock); 1116 write_unlock(&css_set_lock);
1117 1117
1118 list_del(&root->root_list); 1118 if (!list_empty(&root->root_list)) {
1119 root_count--; 1119 list_del(&root->root_list);
1120 root_count--;
1121 }
1120 1122
1121 mutex_unlock(&cgroup_mutex); 1123 mutex_unlock(&cgroup_mutex);
1122 1124
@@ -2434,7 +2436,9 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2434 2436
2435 err_remove: 2437 err_remove:
2436 2438
2439 cgroup_lock_hierarchy(root);
2437 list_del(&cgrp->sibling); 2440 list_del(&cgrp->sibling);
2441 cgroup_unlock_hierarchy(root);
2438 root->number_of_cgroups--; 2442 root->number_of_cgroups--;
2439 2443
2440 err_destroy: 2444 err_destroy:
@@ -2507,7 +2511,7 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp)
2507 for_each_subsys(cgrp->root, ss) { 2511 for_each_subsys(cgrp->root, ss) {
2508 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id]; 2512 struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
2509 int refcnt; 2513 int refcnt;
2510 do { 2514 while (1) {
2511 /* We can only remove a CSS with a refcnt==1 */ 2515 /* We can only remove a CSS with a refcnt==1 */
2512 refcnt = atomic_read(&css->refcnt); 2516 refcnt = atomic_read(&css->refcnt);
2513 if (refcnt > 1) { 2517 if (refcnt > 1) {
@@ -2521,7 +2525,10 @@ static int cgroup_clear_css_refs(struct cgroup *cgrp)
2521 * css_tryget() to spin until we set the 2525 * css_tryget() to spin until we set the
2522 * CSS_REMOVED bits or abort 2526 * CSS_REMOVED bits or abort
2523 */ 2527 */
2524 } while (atomic_cmpxchg(&css->refcnt, refcnt, 0) != refcnt); 2528 if (atomic_cmpxchg(&css->refcnt, refcnt, 0) == refcnt)
2529 break;
2530 cpu_relax();
2531 }
2525 } 2532 }
2526 done: 2533 done:
2527 for_each_subsys(cgrp->root, ss) { 2534 for_each_subsys(cgrp->root, ss) {
@@ -2991,20 +2998,21 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2991 mutex_unlock(&cgroup_mutex); 2998 mutex_unlock(&cgroup_mutex);
2992 return 0; 2999 return 0;
2993 } 3000 }
2994 task_lock(tsk);
2995 cg = tsk->cgroups;
2996 parent = task_cgroup(tsk, subsys->subsys_id);
2997 3001
2998 /* Pin the hierarchy */ 3002 /* Pin the hierarchy */
2999 if (!atomic_inc_not_zero(&parent->root->sb->s_active)) { 3003 if (!atomic_inc_not_zero(&root->sb->s_active)) {
3000 /* We race with the final deactivate_super() */ 3004 /* We race with the final deactivate_super() */
3001 mutex_unlock(&cgroup_mutex); 3005 mutex_unlock(&cgroup_mutex);
3002 return 0; 3006 return 0;
3003 } 3007 }
3004 3008
3005 /* Keep the cgroup alive */ 3009 /* Keep the cgroup alive */
3010 task_lock(tsk);
3011 parent = task_cgroup(tsk, subsys->subsys_id);
3012 cg = tsk->cgroups;
3006 get_css_set(cg); 3013 get_css_set(cg);
3007 task_unlock(tsk); 3014 task_unlock(tsk);
3015
3008 mutex_unlock(&cgroup_mutex); 3016 mutex_unlock(&cgroup_mutex);
3009 3017
3010 /* Now do the VFS work to create a cgroup */ 3018 /* Now do the VFS work to create a cgroup */
@@ -3043,7 +3051,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
3043 mutex_unlock(&inode->i_mutex); 3051 mutex_unlock(&inode->i_mutex);
3044 put_css_set(cg); 3052 put_css_set(cg);
3045 3053
3046 deactivate_super(parent->root->sb); 3054 deactivate_super(root->sb);
3047 /* The cgroup is still accessible in the VFS, but 3055 /* The cgroup is still accessible in the VFS, but
3048 * we're not going to try to rmdir() it at this 3056 * we're not going to try to rmdir() it at this
3049 * point. */ 3057 * point. */
@@ -3069,7 +3077,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
3069 mutex_lock(&cgroup_mutex); 3077 mutex_lock(&cgroup_mutex);
3070 put_css_set(cg); 3078 put_css_set(cg);
3071 mutex_unlock(&cgroup_mutex); 3079 mutex_unlock(&cgroup_mutex);
3072 deactivate_super(parent->root->sb); 3080 deactivate_super(root->sb);
3073 return ret; 3081 return ret;
3074} 3082}
3075 3083
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 647c77a88fcb..a85678865c5e 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -568,7 +568,7 @@ update_domain_attr_tree(struct sched_domain_attr *dattr, struct cpuset *c)
568 * load balancing domains (sched domains) as specified by that partial 568 * load balancing domains (sched domains) as specified by that partial
569 * partition. 569 * partition.
570 * 570 *
571 * See "What is sched_load_balance" in Documentation/cpusets.txt 571 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
572 * for a background explanation of this. 572 * for a background explanation of this.
573 * 573 *
574 * Does not return errors, on the theory that the callers of this 574 * Does not return errors, on the theory that the callers of this
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index 038707404b76..962a3b574f21 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
98 * @size: size of requested memory area 98 * @size: size of requested memory area
99 * @dma_handle: This will be filled with the correct dma handle 99 * @dma_handle: This will be filled with the correct dma handle
100 * @ret: This pointer will be filled with the virtual address 100 * @ret: This pointer will be filled with the virtual address
101 * to allocated area. 101 * to allocated area.
102 * 102 *
103 * This function should be only called from per-arch dma_alloc_coherent() 103 * This function should be only called from per-arch dma_alloc_coherent()
104 * to support allocation from per-device coherent memory pools. 104 * to support allocation from per-device coherent memory pools.
@@ -118,31 +118,32 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
118 mem = dev->dma_mem; 118 mem = dev->dma_mem;
119 if (!mem) 119 if (!mem)
120 return 0; 120 return 0;
121 if (unlikely(size > mem->size)) 121
122 return 0; 122 *ret = NULL;
123
124 if (unlikely(size > (mem->size << PAGE_SHIFT)))
125 goto err;
123 126
124 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order); 127 pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
125 if (pageno >= 0) { 128 if (unlikely(pageno < 0))
126 /* 129 goto err;
127 * Memory was found in the per-device arena. 130
128 */ 131 /*
129 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); 132 * Memory was found in the per-device area.
130 *ret = mem->virt_base + (pageno << PAGE_SHIFT); 133 */
131 memset(*ret, 0, size); 134 *dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
132 } else if (mem->flags & DMA_MEMORY_EXCLUSIVE) { 135 *ret = mem->virt_base + (pageno << PAGE_SHIFT);
133 /* 136 memset(*ret, 0, size);
134 * The per-device arena is exhausted and we are not 137
135 * permitted to fall back to generic memory.
136 */
137 *ret = NULL;
138 } else {
139 /*
140 * The per-device arena is exhausted and we are
141 * permitted to fall back to generic memory.
142 */
143 return 0;
144 }
145 return 1; 138 return 1;
139
140err:
141 /*
142 * In the case where the allocation can not be satisfied from the
143 * per-device area, try to fall back to generic memory if the
144 * constraints allow it.
145 */
146 return mem->flags & DMA_MEMORY_EXCLUSIVE;
146} 147}
147EXPORT_SYMBOL(dma_alloc_from_coherent); 148EXPORT_SYMBOL(dma_alloc_from_coherent);
148 149
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 0511716e9424..667c841c2952 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -209,8 +209,7 @@ static int __init proc_execdomains_init(void)
209module_init(proc_execdomains_init); 209module_init(proc_execdomains_init);
210#endif 210#endif
211 211
212asmlinkage long 212SYSCALL_DEFINE1(personality, u_long, personality)
213sys_personality(u_long personality)
214{ 213{
215 u_long old = current->personality; 214 u_long old = current->personality;
216 215
diff --git a/kernel/exit.c b/kernel/exit.c
index c7740fa3252c..f80dec3f1875 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1141,7 +1141,7 @@ NORET_TYPE void complete_and_exit(struct completion *comp, long code)
1141 1141
1142EXPORT_SYMBOL(complete_and_exit); 1142EXPORT_SYMBOL(complete_and_exit);
1143 1143
1144asmlinkage long sys_exit(int error_code) 1144SYSCALL_DEFINE1(exit, int, error_code)
1145{ 1145{
1146 do_exit((error_code&0xff)<<8); 1146 do_exit((error_code&0xff)<<8);
1147} 1147}
@@ -1182,9 +1182,11 @@ do_group_exit(int exit_code)
1182 * wait4()-ing process will get the correct exit code - even if this 1182 * wait4()-ing process will get the correct exit code - even if this
1183 * thread is not the thread group leader. 1183 * thread is not the thread group leader.
1184 */ 1184 */
1185asmlinkage void sys_exit_group(int error_code) 1185SYSCALL_DEFINE1(exit_group, int, error_code)
1186{ 1186{
1187 do_group_exit((error_code & 0xff) << 8); 1187 do_group_exit((error_code & 0xff) << 8);
1188 /* NOTREACHED */
1189 return 0;
1188} 1190}
1189 1191
1190static struct pid *task_pid_type(struct task_struct *task, enum pid_type type) 1192static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
@@ -1752,9 +1754,8 @@ end:
1752 return retval; 1754 return retval;
1753} 1755}
1754 1756
1755asmlinkage long sys_waitid(int which, pid_t upid, 1757SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1756 struct siginfo __user *infop, int options, 1758 infop, int, options, struct rusage __user *, ru)
1757 struct rusage __user *ru)
1758{ 1759{
1759 struct pid *pid = NULL; 1760 struct pid *pid = NULL;
1760 enum pid_type type; 1761 enum pid_type type;
@@ -1793,8 +1794,8 @@ asmlinkage long sys_waitid(int which, pid_t upid,
1793 return ret; 1794 return ret;
1794} 1795}
1795 1796
1796asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr, 1797SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1797 int options, struct rusage __user *ru) 1798 int, options, struct rusage __user *, ru)
1798{ 1799{
1799 struct pid *pid = NULL; 1800 struct pid *pid = NULL;
1800 enum pid_type type; 1801 enum pid_type type;
@@ -1831,7 +1832,7 @@ asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
1831 * sys_waitpid() remains for compatibility. waitpid() should be 1832 * sys_waitpid() remains for compatibility. waitpid() should be
1832 * implemented by calling sys_wait4() from libc.a. 1833 * implemented by calling sys_wait4() from libc.a.
1833 */ 1834 */
1834asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) 1835SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1835{ 1836{
1836 return sys_wait4(pid, stat_addr, options, NULL); 1837 return sys_wait4(pid, stat_addr, options, NULL);
1837} 1838}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1d68f1255dd8..242a706e7721 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -817,17 +817,17 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
817static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 817static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
818{ 818{
819 struct signal_struct *sig; 819 struct signal_struct *sig;
820 int ret;
821 820
822 if (clone_flags & CLONE_THREAD) { 821 if (clone_flags & CLONE_THREAD) {
823 ret = thread_group_cputime_clone_thread(current); 822 atomic_inc(&current->signal->count);
824 if (likely(!ret)) { 823 atomic_inc(&current->signal->live);
825 atomic_inc(&current->signal->count); 824 return 0;
826 atomic_inc(&current->signal->live);
827 }
828 return ret;
829 } 825 }
830 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 826 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
827
828 if (sig)
829 posix_cpu_timers_init_group(sig);
830
831 tsk->signal = sig; 831 tsk->signal = sig;
832 if (!sig) 832 if (!sig)
833 return -ENOMEM; 833 return -ENOMEM;
@@ -864,8 +864,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
864 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 864 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
865 task_unlock(current->group_leader); 865 task_unlock(current->group_leader);
866 866
867 posix_cpu_timers_init_group(sig);
868
869 acct_init_pacct(&sig->pacct); 867 acct_init_pacct(&sig->pacct);
870 868
871 tty_audit_fork(sig); 869 tty_audit_fork(sig);
@@ -901,7 +899,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
901 clear_freeze_flag(p); 899 clear_freeze_flag(p);
902} 900}
903 901
904asmlinkage long sys_set_tid_address(int __user *tidptr) 902SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
905{ 903{
906 current->clear_child_tid = tidptr; 904 current->clear_child_tid = tidptr;
907 905
@@ -1603,7 +1601,7 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp
1603 * constructed. Here we are modifying the current, active, 1601 * constructed. Here we are modifying the current, active,
1604 * task_struct. 1602 * task_struct.
1605 */ 1603 */
1606asmlinkage long sys_unshare(unsigned long unshare_flags) 1604SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1607{ 1605{
1608 int err = 0; 1606 int err = 0;
1609 struct fs_struct *fs, *new_fs = NULL; 1607 struct fs_struct *fs, *new_fs = NULL;
diff --git a/kernel/futex.c b/kernel/futex.c
index 002aa189eb09..f89d373a9c6d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1733,9 +1733,8 @@ pi_faulted:
1733 * @head: pointer to the list-head 1733 * @head: pointer to the list-head
1734 * @len: length of the list-head, as userspace expects 1734 * @len: length of the list-head, as userspace expects
1735 */ 1735 */
1736asmlinkage long 1736SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
1737sys_set_robust_list(struct robust_list_head __user *head, 1737 size_t, len)
1738 size_t len)
1739{ 1738{
1740 if (!futex_cmpxchg_enabled) 1739 if (!futex_cmpxchg_enabled)
1741 return -ENOSYS; 1740 return -ENOSYS;
@@ -1756,9 +1755,9 @@ sys_set_robust_list(struct robust_list_head __user *head,
1756 * @head_ptr: pointer to a list-head pointer, the kernel fills it in 1755 * @head_ptr: pointer to a list-head pointer, the kernel fills it in
1757 * @len_ptr: pointer to a length field, the kernel fills in the header size 1756 * @len_ptr: pointer to a length field, the kernel fills in the header size
1758 */ 1757 */
1759asmlinkage long 1758SYSCALL_DEFINE3(get_robust_list, int, pid,
1760sys_get_robust_list(int pid, struct robust_list_head __user * __user *head_ptr, 1759 struct robust_list_head __user * __user *, head_ptr,
1761 size_t __user *len_ptr) 1760 size_t __user *, len_ptr)
1762{ 1761{
1763 struct robust_list_head __user *head; 1762 struct robust_list_head __user *head;
1764 unsigned long ret; 1763 unsigned long ret;
@@ -1978,9 +1977,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
1978} 1977}
1979 1978
1980 1979
1981asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, 1980SYSCALL_DEFINE6(futex, u32 __user *, uaddr, int, op, u32, val,
1982 struct timespec __user *utime, u32 __user *uaddr2, 1981 struct timespec __user *, utime, u32 __user *, uaddr2,
1983 u32 val3) 1982 u32, val3)
1984{ 1983{
1985 struct timespec ts; 1984 struct timespec ts;
1986 ktime_t t, *tp = NULL; 1985 ktime_t t, *tp = NULL;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1455b7651b6b..f394d2a42ca3 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -501,6 +501,13 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
501 continue; 501 continue;
502 timer = rb_entry(base->first, struct hrtimer, node); 502 timer = rb_entry(base->first, struct hrtimer, node);
503 expires = ktime_sub(hrtimer_get_expires(timer), base->offset); 503 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
504 /*
505 * clock_was_set() has changed base->offset so the
506 * result might be negative. Fix it up to prevent a
507 * false positive in clockevents_program_event()
508 */
509 if (expires.tv64 < 0)
510 expires.tv64 = 0;
504 if (expires.tv64 < cpu_base->expires_next.tv64) 511 if (expires.tv64 < cpu_base->expires_next.tv64)
505 cpu_base->expires_next = expires; 512 cpu_base->expires_next = expires;
506 } 513 }
@@ -614,7 +621,9 @@ void clock_was_set(void)
614 */ 621 */
615void hres_timers_resume(void) 622void hres_timers_resume(void)
616{ 623{
617 /* Retrigger the CPU local events: */ 624 WARN_ONCE(!irqs_disabled(),
625 KERN_INFO "hres_timers_resume() called with IRQs enabled!");
626
618 retrigger_next_event(NULL); 627 retrigger_next_event(NULL);
619} 628}
620 629
@@ -1156,6 +1165,29 @@ static void __run_hrtimer(struct hrtimer *timer)
1156 1165
1157#ifdef CONFIG_HIGH_RES_TIMERS 1166#ifdef CONFIG_HIGH_RES_TIMERS
1158 1167
1168static int force_clock_reprogram;
1169
1170/*
1171 * After 5 iteration's attempts, we consider that hrtimer_interrupt()
1172 * is hanging, which could happen with something that slows the interrupt
1173 * such as the tracing. Then we force the clock reprogramming for each future
1174 * hrtimer interrupts to avoid infinite loops and use the min_delta_ns
1175 * threshold that we will overwrite.
1176 * The next tick event will be scheduled to 3 times we currently spend on
1177 * hrtimer_interrupt(). This gives a good compromise, the cpus will spend
1178 * 1/4 of their time to process the hrtimer interrupts. This is enough to
1179 * let it running without serious starvation.
1180 */
1181
1182static inline void
1183hrtimer_interrupt_hanging(struct clock_event_device *dev,
1184 ktime_t try_time)
1185{
1186 force_clock_reprogram = 1;
1187 dev->min_delta_ns = (unsigned long)try_time.tv64 * 3;
1188 printk(KERN_WARNING "hrtimer: interrupt too slow, "
1189 "forcing clock min delta to %lu ns\n", dev->min_delta_ns);
1190}
1159/* 1191/*
1160 * High resolution timer interrupt 1192 * High resolution timer interrupt
1161 * Called with interrupts disabled 1193 * Called with interrupts disabled
@@ -1165,6 +1197,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1165 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1197 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1166 struct hrtimer_clock_base *base; 1198 struct hrtimer_clock_base *base;
1167 ktime_t expires_next, now; 1199 ktime_t expires_next, now;
1200 int nr_retries = 0;
1168 int i; 1201 int i;
1169 1202
1170 BUG_ON(!cpu_base->hres_active); 1203 BUG_ON(!cpu_base->hres_active);
@@ -1172,6 +1205,10 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1172 dev->next_event.tv64 = KTIME_MAX; 1205 dev->next_event.tv64 = KTIME_MAX;
1173 1206
1174 retry: 1207 retry:
1208 /* 5 retries is enough to notice a hang */
1209 if (!(++nr_retries % 5))
1210 hrtimer_interrupt_hanging(dev, ktime_sub(ktime_get(), now));
1211
1175 now = ktime_get(); 1212 now = ktime_get();
1176 1213
1177 expires_next.tv64 = KTIME_MAX; 1214 expires_next.tv64 = KTIME_MAX;
@@ -1224,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1224 1261
1225 /* Reprogramming necessary ? */ 1262 /* Reprogramming necessary ? */
1226 if (expires_next.tv64 != KTIME_MAX) { 1263 if (expires_next.tv64 != KTIME_MAX) {
1227 if (tick_program_event(expires_next, 0)) 1264 if (tick_program_event(expires_next, force_clock_reprogram))
1228 goto retry; 1265 goto retry;
1229 } 1266 }
1230} 1267}
@@ -1467,8 +1504,8 @@ out:
1467 return ret; 1504 return ret;
1468} 1505}
1469 1506
1470asmlinkage long 1507SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1471sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) 1508 struct timespec __user *, rmtp)
1472{ 1509{
1473 struct timespec tu; 1510 struct timespec tu;
1474 1511
@@ -1578,6 +1615,10 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1578 break; 1615 break;
1579 1616
1580#ifdef CONFIG_HOTPLUG_CPU 1617#ifdef CONFIG_HOTPLUG_CPU
1618 case CPU_DYING:
1619 case CPU_DYING_FROZEN:
1620 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
1621 break;
1581 case CPU_DEAD: 1622 case CPU_DEAD:
1582 case CPU_DEAD_FROZEN: 1623 case CPU_DEAD_FROZEN:
1583 { 1624 {
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index c20db0be9173..3aba8d12f328 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -39,6 +39,18 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
39 ack_bad_irq(irq); 39 ack_bad_irq(irq);
40} 40}
41 41
42#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
43static void __init init_irq_default_affinity(void)
44{
45 alloc_bootmem_cpumask_var(&irq_default_affinity);
46 cpumask_setall(irq_default_affinity);
47}
48#else
49static void __init init_irq_default_affinity(void)
50{
51}
52#endif
53
42/* 54/*
43 * Linux has a controller-independent interrupt architecture. 55 * Linux has a controller-independent interrupt architecture.
44 * Every controller has a 'controller-template', that is used 56 * Every controller has a 'controller-template', that is used
@@ -134,6 +146,8 @@ int __init early_irq_init(void)
134 int legacy_count; 146 int legacy_count;
135 int i; 147 int i;
136 148
149 init_irq_default_affinity();
150
137 desc = irq_desc_legacy; 151 desc = irq_desc_legacy;
138 legacy_count = ARRAY_SIZE(irq_desc_legacy); 152 legacy_count = ARRAY_SIZE(irq_desc_legacy);
139 153
@@ -219,6 +233,8 @@ int __init early_irq_init(void)
219 int count; 233 int count;
220 int i; 234 int i;
221 235
236 init_irq_default_affinity();
237
222 desc = irq_desc; 238 desc = irq_desc;
223 count = ARRAY_SIZE(irq_desc); 239 count = ARRAY_SIZE(irq_desc);
224 240
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index cd0cd8dcb345..291f03664552 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -15,17 +15,9 @@
15 15
16#include "internals.h" 16#include "internals.h"
17 17
18#ifdef CONFIG_SMP 18#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
19cpumask_var_t irq_default_affinity; 19cpumask_var_t irq_default_affinity;
20 20
21static int init_irq_default_affinity(void)
22{
23 alloc_cpumask_var(&irq_default_affinity, GFP_KERNEL);
24 cpumask_setall(irq_default_affinity);
25 return 0;
26}
27core_initcall(init_irq_default_affinity);
28
29/** 21/**
30 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 22 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
31 * @irq: interrupt number to wait for 23 * @irq: interrupt number to wait for
diff --git a/kernel/itimer.c b/kernel/itimer.c
index db7c358b9a02..6a5fe93dd8bd 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -100,7 +100,7 @@ int do_getitimer(int which, struct itimerval *value)
100 return 0; 100 return 0;
101} 101}
102 102
103asmlinkage long sys_getitimer(int which, struct itimerval __user *value) 103SYSCALL_DEFINE2(getitimer, int, which, struct itimerval __user *, value)
104{ 104{
105 int error = -EFAULT; 105 int error = -EFAULT;
106 struct itimerval get_buffer; 106 struct itimerval get_buffer;
@@ -260,9 +260,8 @@ unsigned int alarm_setitimer(unsigned int seconds)
260 return it_old.it_value.tv_sec; 260 return it_old.it_value.tv_sec;
261} 261}
262 262
263asmlinkage long sys_setitimer(int which, 263SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
264 struct itimerval __user *value, 264 struct itimerval __user *, ovalue)
265 struct itimerval __user *ovalue)
266{ 265{
267 struct itimerval set_buffer, get_buffer; 266 struct itimerval set_buffer, get_buffer;
268 int error; 267 int error;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index e694afa0eb8c..7b8b0f21a5b1 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -30,19 +30,20 @@
30#define all_var 0 30#define all_var 0
31#endif 31#endif
32 32
33extern const unsigned long kallsyms_addresses[]; 33/* These will be re-linked against their real values during the second link stage */
34extern const u8 kallsyms_names[]; 34extern const unsigned long kallsyms_addresses[] __attribute__((weak));
35extern const u8 kallsyms_names[] __attribute__((weak));
35 36
36/* tell the compiler that the count isn't in the small data section if the arch 37/* tell the compiler that the count isn't in the small data section if the arch
37 * has one (eg: FRV) 38 * has one (eg: FRV)
38 */ 39 */
39extern const unsigned long kallsyms_num_syms 40extern const unsigned long kallsyms_num_syms
40 __attribute__((__section__(".rodata"))); 41__attribute__((weak, section(".rodata")));
41 42
42extern const u8 kallsyms_token_table[]; 43extern const u8 kallsyms_token_table[] __attribute__((weak));
43extern const u16 kallsyms_token_index[]; 44extern const u16 kallsyms_token_index[] __attribute__((weak));
44 45
45extern const unsigned long kallsyms_markers[]; 46extern const unsigned long kallsyms_markers[] __attribute__((weak));
46 47
47static inline int is_kernel_inittext(unsigned long addr) 48static inline int is_kernel_inittext(unsigned long addr)
48{ 49{
@@ -167,6 +168,9 @@ static unsigned long get_symbol_pos(unsigned long addr,
167 unsigned long symbol_start = 0, symbol_end = 0; 168 unsigned long symbol_start = 0, symbol_end = 0;
168 unsigned long i, low, high, mid; 169 unsigned long i, low, high, mid;
169 170
171 /* This kernel should never had been booted. */
172 BUG_ON(!kallsyms_addresses);
173
170 /* do a binary search on the sorted kallsyms_addresses array */ 174 /* do a binary search on the sorted kallsyms_addresses array */
171 low = 0; 175 low = 0;
172 high = kallsyms_num_syms; 176 high = kallsyms_num_syms;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 3fb855ad6aa0..8a6d7b08864e 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -934,9 +934,8 @@ struct kimage *kexec_crash_image;
934 934
935static DEFINE_MUTEX(kexec_mutex); 935static DEFINE_MUTEX(kexec_mutex);
936 936
937asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments, 937SYSCALL_DEFINE4(kexec_load, unsigned long, entry, unsigned long, nr_segments,
938 struct kexec_segment __user *segments, 938 struct kexec_segment __user *, segments, unsigned long, flags)
939 unsigned long flags)
940{ 939{
941 struct kimage **dest_image, *image; 940 struct kimage **dest_image, *image;
942 int result; 941 int result;
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1b9cbdc0127a..7ba8cd9845cb 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -123,7 +123,7 @@ static int collect_garbage_slots(void);
123static int __kprobes check_safety(void) 123static int __kprobes check_safety(void)
124{ 124{
125 int ret = 0; 125 int ret = 0;
126#if defined(CONFIG_PREEMPT) && defined(CONFIG_PM) 126#if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
127 ret = freeze_processes(); 127 ret = freeze_processes();
128 if (ret == 0) { 128 if (ret == 0) {
129 struct task_struct *p, *q; 129 struct task_struct *p, *q;
diff --git a/kernel/module.c b/kernel/module.c
index c9332c90d5a0..e8b51d41dd72 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -743,8 +743,8 @@ static void wait_for_zero_refcount(struct module *mod)
743 mutex_lock(&module_mutex); 743 mutex_lock(&module_mutex);
744} 744}
745 745
746asmlinkage long 746SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
747sys_delete_module(const char __user *name_user, unsigned int flags) 747 unsigned int, flags)
748{ 748{
749 struct module *mod; 749 struct module *mod;
750 char name[MODULE_NAME_LEN]; 750 char name[MODULE_NAME_LEN];
@@ -2296,10 +2296,8 @@ static noinline struct module *load_module(void __user *umod,
2296} 2296}
2297 2297
2298/* This is where the real work happens */ 2298/* This is where the real work happens */
2299asmlinkage long 2299SYSCALL_DEFINE3(init_module, void __user *, umod,
2300sys_init_module(void __user *umod, 2300 unsigned long, len, const char __user *, uargs)
2301 unsigned long len,
2302 const char __user *uargs)
2303{ 2301{
2304 struct module *mod; 2302 struct module *mod;
2305 int ret = 0; 2303 int ret = 0;
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 157de3a47832..fa07da94d7be 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -10,76 +10,6 @@
10#include <linux/kernel_stat.h> 10#include <linux/kernel_stat.h>
11 11
12/* 12/*
13 * Allocate the thread_group_cputime structure appropriately and fill in the
14 * current values of the fields. Called from copy_signal() via
15 * thread_group_cputime_clone_thread() when adding a second or subsequent
16 * thread to a thread group. Assumes interrupts are enabled when called.
17 */
18int thread_group_cputime_alloc(struct task_struct *tsk)
19{
20 struct signal_struct *sig = tsk->signal;
21 struct task_cputime *cputime;
22
23 /*
24 * If we have multiple threads and we don't already have a
25 * per-CPU task_cputime struct (checked in the caller), allocate
26 * one and fill it in with the times accumulated so far. We may
27 * race with another thread so recheck after we pick up the sighand
28 * lock.
29 */
30 cputime = alloc_percpu(struct task_cputime);
31 if (cputime == NULL)
32 return -ENOMEM;
33 spin_lock_irq(&tsk->sighand->siglock);
34 if (sig->cputime.totals) {
35 spin_unlock_irq(&tsk->sighand->siglock);
36 free_percpu(cputime);
37 return 0;
38 }
39 sig->cputime.totals = cputime;
40 cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
41 cputime->utime = tsk->utime;
42 cputime->stime = tsk->stime;
43 cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
44 spin_unlock_irq(&tsk->sighand->siglock);
45 return 0;
46}
47
48/**
49 * thread_group_cputime - Sum the thread group time fields across all CPUs.
50 *
51 * @tsk: The task we use to identify the thread group.
52 * @times: task_cputime structure in which we return the summed fields.
53 *
54 * Walk the list of CPUs to sum the per-CPU time fields in the thread group
55 * time structure.
56 */
57void thread_group_cputime(
58 struct task_struct *tsk,
59 struct task_cputime *times)
60{
61 struct task_cputime *totals, *tot;
62 int i;
63
64 totals = tsk->signal->cputime.totals;
65 if (!totals) {
66 times->utime = tsk->utime;
67 times->stime = tsk->stime;
68 times->sum_exec_runtime = tsk->se.sum_exec_runtime;
69 return;
70 }
71
72 times->stime = times->utime = cputime_zero;
73 times->sum_exec_runtime = 0;
74 for_each_possible_cpu(i) {
75 tot = per_cpu_ptr(totals, i);
76 times->utime = cputime_add(times->utime, tot->utime);
77 times->stime = cputime_add(times->stime, tot->stime);
78 times->sum_exec_runtime += tot->sum_exec_runtime;
79 }
80}
81
82/*
83 * Called after updating RLIMIT_CPU to set timer expiration if necessary. 13 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
84 */ 14 */
85void update_rlimit_cpu(unsigned long rlim_new) 15void update_rlimit_cpu(unsigned long rlim_new)
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 887c63787de6..052ec4d195c7 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -477,10 +477,9 @@ static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
477 477
478/* Create a POSIX.1b interval timer. */ 478/* Create a POSIX.1b interval timer. */
479 479
480asmlinkage long 480SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
481sys_timer_create(const clockid_t which_clock, 481 struct sigevent __user *, timer_event_spec,
482 struct sigevent __user *timer_event_spec, 482 timer_t __user *, created_timer_id)
483 timer_t __user * created_timer_id)
484{ 483{
485 struct k_itimer *new_timer; 484 struct k_itimer *new_timer;
486 int error, new_timer_id; 485 int error, new_timer_id;
@@ -661,8 +660,8 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
661} 660}
662 661
663/* Get the time remaining on a POSIX.1b interval timer. */ 662/* Get the time remaining on a POSIX.1b interval timer. */
664asmlinkage long 663SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
665sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting) 664 struct itimerspec __user *, setting)
666{ 665{
667 struct k_itimer *timr; 666 struct k_itimer *timr;
668 struct itimerspec cur_setting; 667 struct itimerspec cur_setting;
@@ -691,8 +690,7 @@ sys_timer_gettime(timer_t timer_id, struct itimerspec __user *setting)
691 * the call back to do_schedule_next_timer(). So all we need to do is 690 * the call back to do_schedule_next_timer(). So all we need to do is
692 * to pick up the frozen overrun. 691 * to pick up the frozen overrun.
693 */ 692 */
694asmlinkage long 693SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
695sys_timer_getoverrun(timer_t timer_id)
696{ 694{
697 struct k_itimer *timr; 695 struct k_itimer *timr;
698 int overrun; 696 int overrun;
@@ -760,10 +758,9 @@ common_timer_set(struct k_itimer *timr, int flags,
760} 758}
761 759
762/* Set a POSIX.1b interval timer */ 760/* Set a POSIX.1b interval timer */
763asmlinkage long 761SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
764sys_timer_settime(timer_t timer_id, int flags, 762 const struct itimerspec __user *, new_setting,
765 const struct itimerspec __user *new_setting, 763 struct itimerspec __user *, old_setting)
766 struct itimerspec __user *old_setting)
767{ 764{
768 struct k_itimer *timr; 765 struct k_itimer *timr;
769 struct itimerspec new_spec, old_spec; 766 struct itimerspec new_spec, old_spec;
@@ -816,8 +813,7 @@ static inline int timer_delete_hook(struct k_itimer *timer)
816} 813}
817 814
818/* Delete a POSIX.1b interval timer. */ 815/* Delete a POSIX.1b interval timer. */
819asmlinkage long 816SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
820sys_timer_delete(timer_t timer_id)
821{ 817{
822 struct k_itimer *timer; 818 struct k_itimer *timer;
823 unsigned long flags; 819 unsigned long flags;
@@ -903,8 +899,8 @@ int do_posix_clock_nonanosleep(const clockid_t clock, int flags,
903} 899}
904EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep); 900EXPORT_SYMBOL_GPL(do_posix_clock_nonanosleep);
905 901
906asmlinkage long sys_clock_settime(const clockid_t which_clock, 902SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
907 const struct timespec __user *tp) 903 const struct timespec __user *, tp)
908{ 904{
909 struct timespec new_tp; 905 struct timespec new_tp;
910 906
@@ -916,8 +912,8 @@ asmlinkage long sys_clock_settime(const clockid_t which_clock,
916 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp)); 912 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
917} 913}
918 914
919asmlinkage long 915SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
920sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp) 916 struct timespec __user *,tp)
921{ 917{
922 struct timespec kernel_tp; 918 struct timespec kernel_tp;
923 int error; 919 int error;
@@ -933,8 +929,8 @@ sys_clock_gettime(const clockid_t which_clock, struct timespec __user *tp)
933 929
934} 930}
935 931
936asmlinkage long 932SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
937sys_clock_getres(const clockid_t which_clock, struct timespec __user *tp) 933 struct timespec __user *, tp)
938{ 934{
939 struct timespec rtn_tp; 935 struct timespec rtn_tp;
940 int error; 936 int error;
@@ -963,10 +959,9 @@ static int common_nsleep(const clockid_t which_clock, int flags,
963 which_clock); 959 which_clock);
964} 960}
965 961
966asmlinkage long 962SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
967sys_clock_nanosleep(const clockid_t which_clock, int flags, 963 const struct timespec __user *, rqtp,
968 const struct timespec __user *rqtp, 964 struct timespec __user *, rmtp)
969 struct timespec __user *rmtp)
970{ 965{
971 struct timespec t; 966 struct timespec t;
972 967
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 597823b5b700..d7a10167a25b 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -4,7 +4,8 @@ EXTRA_CFLAGS += -DDEBUG
4endif 4endif
5 5
6obj-y := main.o 6obj-y := main.o
7obj-$(CONFIG_PM_SLEEP) += process.o console.o 7obj-$(CONFIG_PM_SLEEP) += console.o
8obj-$(CONFIG_FREEZER) += process.o
8obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o 9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
9 10
10obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o 11obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 45e8541ab7e3..432ee575c9ee 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -71,6 +71,14 @@ void hibernation_set_ops(struct platform_hibernation_ops *ops)
71 mutex_unlock(&pm_mutex); 71 mutex_unlock(&pm_mutex);
72} 72}
73 73
74static bool entering_platform_hibernation;
75
76bool system_entering_hibernation(void)
77{
78 return entering_platform_hibernation;
79}
80EXPORT_SYMBOL(system_entering_hibernation);
81
74#ifdef CONFIG_PM_DEBUG 82#ifdef CONFIG_PM_DEBUG
75static void hibernation_debug_sleep(void) 83static void hibernation_debug_sleep(void)
76{ 84{
@@ -411,6 +419,7 @@ int hibernation_platform_enter(void)
411 if (error) 419 if (error)
412 goto Close; 420 goto Close;
413 421
422 entering_platform_hibernation = true;
414 suspend_console(); 423 suspend_console();
415 error = device_suspend(PMSG_HIBERNATE); 424 error = device_suspend(PMSG_HIBERNATE);
416 if (error) { 425 if (error) {
@@ -445,6 +454,7 @@ int hibernation_platform_enter(void)
445 Finish: 454 Finish:
446 hibernation_ops->finish(); 455 hibernation_ops->finish();
447 Resume_devices: 456 Resume_devices:
457 entering_platform_hibernation = false;
448 device_resume(PMSG_RESTORE); 458 device_resume(PMSG_RESTORE);
449 resume_console(); 459 resume_console();
450 Close: 460 Close:
diff --git a/kernel/printk.c b/kernel/printk.c
index 7015733793e8..69188f226a93 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -382,7 +382,7 @@ out:
382 return error; 382 return error;
383} 383}
384 384
385asmlinkage long sys_syslog(int type, char __user *buf, int len) 385SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
386{ 386{
387 return do_syslog(type, buf, len); 387 return do_syslog(type, buf, len);
388} 388}
@@ -742,11 +742,6 @@ EXPORT_SYMBOL(vprintk);
742 742
743#else 743#else
744 744
745asmlinkage long sys_syslog(int type, char __user *buf, int len)
746{
747 return -ENOSYS;
748}
749
750static void call_console_drivers(unsigned start, unsigned end) 745static void call_console_drivers(unsigned start, unsigned end)
751{ 746{
752} 747}
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 29dc700e198c..c9cf48b21f05 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -574,7 +574,7 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
574#define arch_ptrace_attach(child) do { } while (0) 574#define arch_ptrace_attach(child) do { } while (0)
575#endif 575#endif
576 576
577asmlinkage long sys_ptrace(long request, long pid, long addr, long data) 577SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
578{ 578{
579 struct task_struct *child; 579 struct task_struct *child;
580 long ret; 580 long ret;
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index 490934fc7ac3..bd5a9003497c 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -716,7 +716,7 @@ void rcu_check_callbacks(int cpu, int user)
716 raise_rcu_softirq(); 716 raise_rcu_softirq();
717} 717}
718 718
719static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, 719static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
720 struct rcu_data *rdp) 720 struct rcu_data *rdp)
721{ 721{
722 unsigned long flags; 722 unsigned long flags;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index f2d8638e6c60..b2fd602a6f6f 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1314,7 +1314,7 @@ int rcu_needs_cpu(int cpu)
1314 * access due to the fact that this CPU cannot possibly have any RCU 1314 * access due to the fact that this CPU cannot possibly have any RCU
1315 * callbacks in flight yet. 1315 * callbacks in flight yet.
1316 */ 1316 */
1317static void 1317static void __cpuinit
1318rcu_init_percpu_data(int cpu, struct rcu_state *rsp) 1318rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
1319{ 1319{
1320 unsigned long flags; 1320 unsigned long flags;
diff --git a/kernel/relay.c b/kernel/relay.c
index 09ac2008f77b..9d79b7854fa6 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -663,8 +663,10 @@ int relay_late_setup_files(struct rchan *chan,
663 663
664 mutex_lock(&relay_channels_mutex); 664 mutex_lock(&relay_channels_mutex);
665 /* Is chan already set up? */ 665 /* Is chan already set up? */
666 if (unlikely(chan->has_base_filename)) 666 if (unlikely(chan->has_base_filename)) {
667 mutex_unlock(&relay_channels_mutex);
667 return -EEXIST; 668 return -EEXIST;
669 }
668 chan->has_base_filename = 1; 670 chan->has_base_filename = 1;
669 chan->parent = parent; 671 chan->parent = parent;
670 curr_cpu = get_cpu(); 672 curr_cpu = get_cpu();
diff --git a/kernel/resource.c b/kernel/resource.c
index ca6a1536b205..fd5d7d574bb9 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -620,6 +620,7 @@ resource_size_t resource_alignment(struct resource *res)
620 * @start: resource start address 620 * @start: resource start address
621 * @n: resource region size 621 * @n: resource region size
622 * @name: reserving caller's ID string 622 * @name: reserving caller's ID string
623 * @flags: IO resource flags
623 */ 624 */
624struct resource * __request_region(struct resource *parent, 625struct resource * __request_region(struct resource *parent,
625 resource_size_t start, resource_size_t n, 626 resource_size_t start, resource_size_t n,
diff --git a/kernel/sched.c b/kernel/sched.c
index 8be2c13b50d0..52bbf1c842a8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1323,8 +1323,8 @@ static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
1323 * slice expiry etc. 1323 * slice expiry etc.
1324 */ 1324 */
1325 1325
1326#define WEIGHT_IDLEPRIO 2 1326#define WEIGHT_IDLEPRIO 3
1327#define WMULT_IDLEPRIO (1 << 31) 1327#define WMULT_IDLEPRIO 1431655765
1328 1328
1329/* 1329/*
1330 * Nice levels are multiplicative, with a gentle 10% change for every 1330 * Nice levels are multiplicative, with a gentle 10% change for every
@@ -4440,7 +4440,7 @@ void __kprobes sub_preempt_count(int val)
4440 /* 4440 /*
4441 * Underflow? 4441 * Underflow?
4442 */ 4442 */
4443 if (DEBUG_LOCKS_WARN_ON(val > preempt_count() - (!!kernel_locked()))) 4443 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
4444 return; 4444 return;
4445 /* 4445 /*
4446 * Is the spinlock portion underflowing? 4446 * Is the spinlock portion underflowing?
@@ -5126,7 +5126,7 @@ int can_nice(const struct task_struct *p, const int nice)
5126 * sys_setpriority is a more generic, but much slower function that 5126 * sys_setpriority is a more generic, but much slower function that
5127 * does similar things. 5127 * does similar things.
5128 */ 5128 */
5129asmlinkage long sys_nice(int increment) 5129SYSCALL_DEFINE1(nice, int, increment)
5130{ 5130{
5131 long nice, retval; 5131 long nice, retval;
5132 5132
@@ -5433,8 +5433,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5433 * @policy: new policy. 5433 * @policy: new policy.
5434 * @param: structure containing the new RT priority. 5434 * @param: structure containing the new RT priority.
5435 */ 5435 */
5436asmlinkage long 5436SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
5437sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) 5437 struct sched_param __user *, param)
5438{ 5438{
5439 /* negative values for policy are not valid */ 5439 /* negative values for policy are not valid */
5440 if (policy < 0) 5440 if (policy < 0)
@@ -5448,7 +5448,7 @@ sys_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
5448 * @pid: the pid in question. 5448 * @pid: the pid in question.
5449 * @param: structure containing the new RT priority. 5449 * @param: structure containing the new RT priority.
5450 */ 5450 */
5451asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) 5451SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
5452{ 5452{
5453 return do_sched_setscheduler(pid, -1, param); 5453 return do_sched_setscheduler(pid, -1, param);
5454} 5454}
@@ -5457,7 +5457,7 @@ asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
5457 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 5457 * sys_sched_getscheduler - get the policy (scheduling class) of a thread
5458 * @pid: the pid in question. 5458 * @pid: the pid in question.
5459 */ 5459 */
5460asmlinkage long sys_sched_getscheduler(pid_t pid) 5460SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
5461{ 5461{
5462 struct task_struct *p; 5462 struct task_struct *p;
5463 int retval; 5463 int retval;
@@ -5482,7 +5482,7 @@ asmlinkage long sys_sched_getscheduler(pid_t pid)
5482 * @pid: the pid in question. 5482 * @pid: the pid in question.
5483 * @param: structure containing the RT priority. 5483 * @param: structure containing the RT priority.
5484 */ 5484 */
5485asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) 5485SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
5486{ 5486{
5487 struct sched_param lp; 5487 struct sched_param lp;
5488 struct task_struct *p; 5488 struct task_struct *p;
@@ -5600,8 +5600,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
5600 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5600 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5601 * @user_mask_ptr: user-space pointer to the new cpu mask 5601 * @user_mask_ptr: user-space pointer to the new cpu mask
5602 */ 5602 */
5603asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, 5603SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
5604 unsigned long __user *user_mask_ptr) 5604 unsigned long __user *, user_mask_ptr)
5605{ 5605{
5606 cpumask_var_t new_mask; 5606 cpumask_var_t new_mask;
5607 int retval; 5607 int retval;
@@ -5648,8 +5648,8 @@ out_unlock:
5648 * @len: length in bytes of the bitmask pointed to by user_mask_ptr 5648 * @len: length in bytes of the bitmask pointed to by user_mask_ptr
5649 * @user_mask_ptr: user-space pointer to hold the current cpu mask 5649 * @user_mask_ptr: user-space pointer to hold the current cpu mask
5650 */ 5650 */
5651asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, 5651SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
5652 unsigned long __user *user_mask_ptr) 5652 unsigned long __user *, user_mask_ptr)
5653{ 5653{
5654 int ret; 5654 int ret;
5655 cpumask_var_t mask; 5655 cpumask_var_t mask;
@@ -5678,7 +5678,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
5678 * This function yields the current CPU to other tasks. If there are no 5678 * This function yields the current CPU to other tasks. If there are no
5679 * other threads running on this CPU then this function will return. 5679 * other threads running on this CPU then this function will return.
5680 */ 5680 */
5681asmlinkage long sys_sched_yield(void) 5681SYSCALL_DEFINE0(sched_yield)
5682{ 5682{
5683 struct rq *rq = this_rq_lock(); 5683 struct rq *rq = this_rq_lock();
5684 5684
@@ -5819,7 +5819,7 @@ long __sched io_schedule_timeout(long timeout)
5819 * this syscall returns the maximum rt_priority that can be used 5819 * this syscall returns the maximum rt_priority that can be used
5820 * by a given scheduling class. 5820 * by a given scheduling class.
5821 */ 5821 */
5822asmlinkage long sys_sched_get_priority_max(int policy) 5822SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
5823{ 5823{
5824 int ret = -EINVAL; 5824 int ret = -EINVAL;
5825 5825
@@ -5844,7 +5844,7 @@ asmlinkage long sys_sched_get_priority_max(int policy)
5844 * this syscall returns the minimum rt_priority that can be used 5844 * this syscall returns the minimum rt_priority that can be used
5845 * by a given scheduling class. 5845 * by a given scheduling class.
5846 */ 5846 */
5847asmlinkage long sys_sched_get_priority_min(int policy) 5847SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
5848{ 5848{
5849 int ret = -EINVAL; 5849 int ret = -EINVAL;
5850 5850
@@ -5869,8 +5869,8 @@ asmlinkage long sys_sched_get_priority_min(int policy)
5869 * this syscall writes the default timeslice value of a given process 5869 * this syscall writes the default timeslice value of a given process
5870 * into the user-space timespec buffer. A value of '0' means infinity. 5870 * into the user-space timespec buffer. A value of '0' means infinity.
5871 */ 5871 */
5872asmlinkage 5872SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
5873long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) 5873 struct timespec __user *, interval)
5874{ 5874{
5875 struct task_struct *p; 5875 struct task_struct *p;
5876 unsigned int time_slice; 5876 unsigned int time_slice;
@@ -9050,6 +9050,13 @@ static int tg_schedulable(struct task_group *tg, void *data)
9050 runtime = d->rt_runtime; 9050 runtime = d->rt_runtime;
9051 } 9051 }
9052 9052
9053#ifdef CONFIG_USER_SCHED
9054 if (tg == &root_task_group) {
9055 period = global_rt_period();
9056 runtime = global_rt_runtime();
9057 }
9058#endif
9059
9053 /* 9060 /*
9054 * Cannot have more runtime than the period. 9061 * Cannot have more runtime than the period.
9055 */ 9062 */
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 8e1352c75557..5cc1c162044f 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -283,7 +283,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq)
283 struct sched_entity, 283 struct sched_entity,
284 run_node); 284 run_node);
285 285
286 if (vruntime == cfs_rq->min_vruntime) 286 if (!cfs_rq->curr)
287 vruntime = se->vruntime; 287 vruntime = se->vruntime;
288 else 288 else
289 vruntime = min_vruntime(vruntime, se->vruntime); 289 vruntime = min_vruntime(vruntime, se->vruntime);
@@ -429,7 +429,10 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq); 429 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
430 430
431 for_each_sched_entity(se) { 431 for_each_sched_entity(se) {
432 struct load_weight *load = &cfs_rq->load; 432 struct load_weight *load;
433
434 cfs_rq = cfs_rq_of(se);
435 load = &cfs_rq->load;
433 436
434 if (unlikely(!se->on_rq)) { 437 if (unlikely(!se->on_rq)) {
435 struct load_weight lw = cfs_rq->load; 438 struct load_weight lw = cfs_rq->load;
@@ -677,9 +680,13 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
677 unsigned long thresh = sysctl_sched_latency; 680 unsigned long thresh = sysctl_sched_latency;
678 681
679 /* 682 /*
680 * convert the sleeper threshold into virtual time 683 * Convert the sleeper threshold into virtual time.
684 * SCHED_IDLE is a special sub-class. We care about
685 * fairness only relative to other SCHED_IDLE tasks,
686 * all of which have the same weight.
681 */ 687 */
682 if (sched_feat(NORMALIZED_SLEEPER)) 688 if (sched_feat(NORMALIZED_SLEEPER) &&
689 task_of(se)->policy != SCHED_IDLE)
683 thresh = calc_delta_fair(thresh, se); 690 thresh = calc_delta_fair(thresh, se);
684 691
685 vruntime -= thresh; 692 vruntime -= thresh;
@@ -1340,14 +1347,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1340 1347
1341static void set_last_buddy(struct sched_entity *se) 1348static void set_last_buddy(struct sched_entity *se)
1342{ 1349{
1343 for_each_sched_entity(se) 1350 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1344 cfs_rq_of(se)->last = se; 1351 for_each_sched_entity(se)
1352 cfs_rq_of(se)->last = se;
1353 }
1345} 1354}
1346 1355
1347static void set_next_buddy(struct sched_entity *se) 1356static void set_next_buddy(struct sched_entity *se)
1348{ 1357{
1349 for_each_sched_entity(se) 1358 if (likely(task_of(se)->policy != SCHED_IDLE)) {
1350 cfs_rq_of(se)->next = se; 1359 for_each_sched_entity(se)
1360 cfs_rq_of(se)->next = se;
1361 }
1351} 1362}
1352 1363
1353/* 1364/*
@@ -1393,12 +1404,18 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1393 return; 1404 return;
1394 1405
1395 /* 1406 /*
1396 * Batch tasks do not preempt (their preemption is driven by 1407 * Batch and idle tasks do not preempt (their preemption is driven by
1397 * the tick): 1408 * the tick):
1398 */ 1409 */
1399 if (unlikely(p->policy == SCHED_BATCH)) 1410 if (unlikely(p->policy != SCHED_NORMAL))
1400 return; 1411 return;
1401 1412
1413 /* Idle tasks are by definition preempted by everybody. */
1414 if (unlikely(curr->policy == SCHED_IDLE)) {
1415 resched_task(curr);
1416 return;
1417 }
1418
1402 if (!sched_feat(WAKEUP_PREEMPT)) 1419 if (!sched_feat(WAKEUP_PREEMPT))
1403 return; 1420 return;
1404 1421
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index f2773b5d1226..8ab0cef8ecab 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -296,6 +296,7 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
296static inline void account_group_user_time(struct task_struct *tsk, 296static inline void account_group_user_time(struct task_struct *tsk,
297 cputime_t cputime) 297 cputime_t cputime)
298{ 298{
299 struct task_cputime *times;
299 struct signal_struct *sig; 300 struct signal_struct *sig;
300 301
301 /* tsk == current, ensure it is safe to use ->signal */ 302 /* tsk == current, ensure it is safe to use ->signal */
@@ -303,13 +304,11 @@ static inline void account_group_user_time(struct task_struct *tsk,
303 return; 304 return;
304 305
305 sig = tsk->signal; 306 sig = tsk->signal;
306 if (sig->cputime.totals) { 307 times = &sig->cputime.totals;
307 struct task_cputime *times;
308 308
309 times = per_cpu_ptr(sig->cputime.totals, get_cpu()); 309 spin_lock(&times->lock);
310 times->utime = cputime_add(times->utime, cputime); 310 times->utime = cputime_add(times->utime, cputime);
311 put_cpu_no_resched(); 311 spin_unlock(&times->lock);
312 }
313} 312}
314 313
315/** 314/**
@@ -325,6 +324,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
325static inline void account_group_system_time(struct task_struct *tsk, 324static inline void account_group_system_time(struct task_struct *tsk,
326 cputime_t cputime) 325 cputime_t cputime)
327{ 326{
327 struct task_cputime *times;
328 struct signal_struct *sig; 328 struct signal_struct *sig;
329 329
330 /* tsk == current, ensure it is safe to use ->signal */ 330 /* tsk == current, ensure it is safe to use ->signal */
@@ -332,13 +332,11 @@ static inline void account_group_system_time(struct task_struct *tsk,
332 return; 332 return;
333 333
334 sig = tsk->signal; 334 sig = tsk->signal;
335 if (sig->cputime.totals) { 335 times = &sig->cputime.totals;
336 struct task_cputime *times;
337 336
338 times = per_cpu_ptr(sig->cputime.totals, get_cpu()); 337 spin_lock(&times->lock);
339 times->stime = cputime_add(times->stime, cputime); 338 times->stime = cputime_add(times->stime, cputime);
340 put_cpu_no_resched(); 339 spin_unlock(&times->lock);
341 }
342} 340}
343 341
344/** 342/**
@@ -354,6 +352,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
354static inline void account_group_exec_runtime(struct task_struct *tsk, 352static inline void account_group_exec_runtime(struct task_struct *tsk,
355 unsigned long long ns) 353 unsigned long long ns)
356{ 354{
355 struct task_cputime *times;
357 struct signal_struct *sig; 356 struct signal_struct *sig;
358 357
359 sig = tsk->signal; 358 sig = tsk->signal;
@@ -362,11 +361,9 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
362 if (unlikely(!sig)) 361 if (unlikely(!sig))
363 return; 362 return;
364 363
365 if (sig->cputime.totals) { 364 times = &sig->cputime.totals;
366 struct task_cputime *times;
367 365
368 times = per_cpu_ptr(sig->cputime.totals, get_cpu()); 366 spin_lock(&times->lock);
369 times->sum_exec_runtime += ns; 367 times->sum_exec_runtime += ns;
370 put_cpu_no_resched(); 368 spin_unlock(&times->lock);
371 }
372} 369}
diff --git a/kernel/signal.c b/kernel/signal.c
index 3152ac3b62e2..e73759783dc8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1961,7 +1961,7 @@ EXPORT_SYMBOL(unblock_all_signals);
1961 * System call entry points. 1961 * System call entry points.
1962 */ 1962 */
1963 1963
1964asmlinkage long sys_restart_syscall(void) 1964SYSCALL_DEFINE0(restart_syscall)
1965{ 1965{
1966 struct restart_block *restart = &current_thread_info()->restart_block; 1966 struct restart_block *restart = &current_thread_info()->restart_block;
1967 return restart->fn(restart); 1967 return restart->fn(restart);
@@ -2014,8 +2014,8 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2014 return error; 2014 return error;
2015} 2015}
2016 2016
2017asmlinkage long 2017SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2018sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) 2018 sigset_t __user *, oset, size_t, sigsetsize)
2019{ 2019{
2020 int error = -EINVAL; 2020 int error = -EINVAL;
2021 sigset_t old_set, new_set; 2021 sigset_t old_set, new_set;
@@ -2074,8 +2074,7 @@ out:
2074 return error; 2074 return error;
2075} 2075}
2076 2076
2077asmlinkage long 2077SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2078sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2079{ 2078{
2080 return do_sigpending(set, sigsetsize); 2079 return do_sigpending(set, sigsetsize);
2081} 2080}
@@ -2146,11 +2145,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2146 2145
2147#endif 2146#endif
2148 2147
2149asmlinkage long 2148SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2150sys_rt_sigtimedwait(const sigset_t __user *uthese, 2149 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2151 siginfo_t __user *uinfo, 2150 size_t, sigsetsize)
2152 const struct timespec __user *uts,
2153 size_t sigsetsize)
2154{ 2151{
2155 int ret, sig; 2152 int ret, sig;
2156 sigset_t these; 2153 sigset_t these;
@@ -2223,8 +2220,7 @@ sys_rt_sigtimedwait(const sigset_t __user *uthese,
2223 return ret; 2220 return ret;
2224} 2221}
2225 2222
2226asmlinkage long 2223SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2227sys_kill(pid_t pid, int sig)
2228{ 2224{
2229 struct siginfo info; 2225 struct siginfo info;
2230 2226
@@ -2283,7 +2279,7 @@ static int do_tkill(pid_t tgid, pid_t pid, int sig)
2283 * exists but it's not belonging to the target process anymore. This 2279 * exists but it's not belonging to the target process anymore. This
2284 * method solves the problem of threads exiting and PIDs getting reused. 2280 * method solves the problem of threads exiting and PIDs getting reused.
2285 */ 2281 */
2286asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig) 2282SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2287{ 2283{
2288 /* This is only valid for single tasks */ 2284 /* This is only valid for single tasks */
2289 if (pid <= 0 || tgid <= 0) 2285 if (pid <= 0 || tgid <= 0)
@@ -2295,8 +2291,7 @@ asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2295/* 2291/*
2296 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2292 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2297 */ 2293 */
2298asmlinkage long 2294SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2299sys_tkill(pid_t pid, int sig)
2300{ 2295{
2301 /* This is only valid for single tasks */ 2296 /* This is only valid for single tasks */
2302 if (pid <= 0) 2297 if (pid <= 0)
@@ -2305,8 +2300,8 @@ sys_tkill(pid_t pid, int sig)
2305 return do_tkill(0, pid, sig); 2300 return do_tkill(0, pid, sig);
2306} 2301}
2307 2302
2308asmlinkage long 2303SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2309sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo) 2304 siginfo_t __user *, uinfo)
2310{ 2305{
2311 siginfo_t info; 2306 siginfo_t info;
2312 2307
@@ -2434,8 +2429,7 @@ out:
2434 2429
2435#ifdef __ARCH_WANT_SYS_SIGPENDING 2430#ifdef __ARCH_WANT_SYS_SIGPENDING
2436 2431
2437asmlinkage long 2432SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2438sys_sigpending(old_sigset_t __user *set)
2439{ 2433{
2440 return do_sigpending(set, sizeof(*set)); 2434 return do_sigpending(set, sizeof(*set));
2441} 2435}
@@ -2446,8 +2440,8 @@ sys_sigpending(old_sigset_t __user *set)
2446/* Some platforms have their own version with special arguments others 2440/* Some platforms have their own version with special arguments others
2447 support only sys_rt_sigprocmask. */ 2441 support only sys_rt_sigprocmask. */
2448 2442
2449asmlinkage long 2443SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2450sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) 2444 old_sigset_t __user *, oset)
2451{ 2445{
2452 int error; 2446 int error;
2453 old_sigset_t old_set, new_set; 2447 old_sigset_t old_set, new_set;
@@ -2497,11 +2491,10 @@ out:
2497#endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2491#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2498 2492
2499#ifdef __ARCH_WANT_SYS_RT_SIGACTION 2493#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2500asmlinkage long 2494SYSCALL_DEFINE4(rt_sigaction, int, sig,
2501sys_rt_sigaction(int sig, 2495 const struct sigaction __user *, act,
2502 const struct sigaction __user *act, 2496 struct sigaction __user *, oact,
2503 struct sigaction __user *oact, 2497 size_t, sigsetsize)
2504 size_t sigsetsize)
2505{ 2498{
2506 struct k_sigaction new_sa, old_sa; 2499 struct k_sigaction new_sa, old_sa;
2507 int ret = -EINVAL; 2500 int ret = -EINVAL;
@@ -2531,15 +2524,13 @@ out:
2531/* 2524/*
2532 * For backwards compatibility. Functionality superseded by sigprocmask. 2525 * For backwards compatibility. Functionality superseded by sigprocmask.
2533 */ 2526 */
2534asmlinkage long 2527SYSCALL_DEFINE0(sgetmask)
2535sys_sgetmask(void)
2536{ 2528{
2537 /* SMP safe */ 2529 /* SMP safe */
2538 return current->blocked.sig[0]; 2530 return current->blocked.sig[0];
2539} 2531}
2540 2532
2541asmlinkage long 2533SYSCALL_DEFINE1(ssetmask, int, newmask)
2542sys_ssetmask(int newmask)
2543{ 2534{
2544 int old; 2535 int old;
2545 2536
@@ -2559,8 +2550,7 @@ sys_ssetmask(int newmask)
2559/* 2550/*
2560 * For backwards compatibility. Functionality superseded by sigaction. 2551 * For backwards compatibility. Functionality superseded by sigaction.
2561 */ 2552 */
2562asmlinkage unsigned long 2553SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2563sys_signal(int sig, __sighandler_t handler)
2564{ 2554{
2565 struct k_sigaction new_sa, old_sa; 2555 struct k_sigaction new_sa, old_sa;
2566 int ret; 2556 int ret;
@@ -2577,8 +2567,7 @@ sys_signal(int sig, __sighandler_t handler)
2577 2567
2578#ifdef __ARCH_WANT_SYS_PAUSE 2568#ifdef __ARCH_WANT_SYS_PAUSE
2579 2569
2580asmlinkage long 2570SYSCALL_DEFINE0(pause)
2581sys_pause(void)
2582{ 2571{
2583 current->state = TASK_INTERRUPTIBLE; 2572 current->state = TASK_INTERRUPTIBLE;
2584 schedule(); 2573 schedule();
@@ -2588,7 +2577,7 @@ sys_pause(void)
2588#endif 2577#endif
2589 2578
2590#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2579#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2591asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize) 2580SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2592{ 2581{
2593 sigset_t newset; 2582 sigset_t newset;
2594 2583
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index d9188c66278a..85d5a2455103 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -16,6 +16,7 @@
16#include <linux/lockdep.h> 16#include <linux/lockdep.h>
17#include <linux/notifier.h> 17#include <linux/notifier.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/sysctl.h>
19 20
20#include <asm/irq_regs.h> 21#include <asm/irq_regs.h>
21 22
@@ -88,6 +89,14 @@ void touch_all_softlockup_watchdogs(void)
88} 89}
89EXPORT_SYMBOL(touch_all_softlockup_watchdogs); 90EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
90 91
92int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
93 struct file *filp, void __user *buffer,
94 size_t *lenp, loff_t *ppos)
95{
96 touch_all_softlockup_watchdogs();
97 return proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
98}
99
91/* 100/*
92 * This callback runs from the timer interrupt, and checks 101 * This callback runs from the timer interrupt, and checks
93 * whether the watchdog thread has hung or not: 102 * whether the watchdog thread has hung or not:
diff --git a/kernel/sys.c b/kernel/sys.c
index 763c3c17ded3..e7dc0e10a485 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -143,7 +143,7 @@ out:
143 return error; 143 return error;
144} 144}
145 145
146asmlinkage long sys_setpriority(int which, int who, int niceval) 146SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
147{ 147{
148 struct task_struct *g, *p; 148 struct task_struct *g, *p;
149 struct user_struct *user; 149 struct user_struct *user;
@@ -208,7 +208,7 @@ out:
208 * has been offset by 20 (ie it returns 40..1 instead of -20..19) 208 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
209 * to stay compatible. 209 * to stay compatible.
210 */ 210 */
211asmlinkage long sys_getpriority(int which, int who) 211SYSCALL_DEFINE2(getpriority, int, which, int, who)
212{ 212{
213 struct task_struct *g, *p; 213 struct task_struct *g, *p;
214 struct user_struct *user; 214 struct user_struct *user;
@@ -355,7 +355,8 @@ EXPORT_SYMBOL_GPL(kernel_power_off);
355 * 355 *
356 * reboot doesn't sync: do that yourself before calling this. 356 * reboot doesn't sync: do that yourself before calling this.
357 */ 357 */
358asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) 358SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
359 void __user *, arg)
359{ 360{
360 char buffer[256]; 361 char buffer[256];
361 362
@@ -478,7 +479,7 @@ void ctrl_alt_del(void)
478 * SMP: There are not races, the GIDs are checked only by filesystem 479 * SMP: There are not races, the GIDs are checked only by filesystem
479 * operations (as far as semantic preservation is concerned). 480 * operations (as far as semantic preservation is concerned).
480 */ 481 */
481asmlinkage long sys_setregid(gid_t rgid, gid_t egid) 482SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
482{ 483{
483 const struct cred *old; 484 const struct cred *old;
484 struct cred *new; 485 struct cred *new;
@@ -529,7 +530,7 @@ error:
529 * 530 *
530 * SMP: Same implicit races as above. 531 * SMP: Same implicit races as above.
531 */ 532 */
532asmlinkage long sys_setgid(gid_t gid) 533SYSCALL_DEFINE1(setgid, gid_t, gid)
533{ 534{
534 const struct cred *old; 535 const struct cred *old;
535 struct cred *new; 536 struct cred *new;
@@ -597,7 +598,7 @@ static int set_user(struct cred *new)
597 * 100% compatible with BSD. A program which uses just setuid() will be 598 * 100% compatible with BSD. A program which uses just setuid() will be
598 * 100% compatible with POSIX with saved IDs. 599 * 100% compatible with POSIX with saved IDs.
599 */ 600 */
600asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) 601SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
601{ 602{
602 const struct cred *old; 603 const struct cred *old;
603 struct cred *new; 604 struct cred *new;
@@ -661,7 +662,7 @@ error:
661 * will allow a root program to temporarily drop privileges and be able to 662 * will allow a root program to temporarily drop privileges and be able to
662 * regain them by swapping the real and effective uid. 663 * regain them by swapping the real and effective uid.
663 */ 664 */
664asmlinkage long sys_setuid(uid_t uid) 665SYSCALL_DEFINE1(setuid, uid_t, uid)
665{ 666{
666 const struct cred *old; 667 const struct cred *old;
667 struct cred *new; 668 struct cred *new;
@@ -705,7 +706,7 @@ error:
705 * This function implements a generic ability to update ruid, euid, 706 * This function implements a generic ability to update ruid, euid,
706 * and suid. This allows you to implement the 4.4 compatible seteuid(). 707 * and suid. This allows you to implement the 4.4 compatible seteuid().
707 */ 708 */
708asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) 709SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
709{ 710{
710 const struct cred *old; 711 const struct cred *old;
711 struct cred *new; 712 struct cred *new;
@@ -756,7 +757,7 @@ error:
756 return retval; 757 return retval;
757} 758}
758 759
759asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) 760SYSCALL_DEFINE3(getresuid, uid_t __user *, ruid, uid_t __user *, euid, uid_t __user *, suid)
760{ 761{
761 const struct cred *cred = current_cred(); 762 const struct cred *cred = current_cred();
762 int retval; 763 int retval;
@@ -771,7 +772,7 @@ asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __us
771/* 772/*
772 * Same as above, but for rgid, egid, sgid. 773 * Same as above, but for rgid, egid, sgid.
773 */ 774 */
774asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) 775SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
775{ 776{
776 const struct cred *old; 777 const struct cred *old;
777 struct cred *new; 778 struct cred *new;
@@ -814,7 +815,7 @@ error:
814 return retval; 815 return retval;
815} 816}
816 817
817asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) 818SYSCALL_DEFINE3(getresgid, gid_t __user *, rgid, gid_t __user *, egid, gid_t __user *, sgid)
818{ 819{
819 const struct cred *cred = current_cred(); 820 const struct cred *cred = current_cred();
820 int retval; 821 int retval;
@@ -833,7 +834,7 @@ asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __us
833 * whatever uid it wants to). It normally shadows "euid", except when 834 * whatever uid it wants to). It normally shadows "euid", except when
834 * explicitly set by setfsuid() or for access.. 835 * explicitly set by setfsuid() or for access..
835 */ 836 */
836asmlinkage long sys_setfsuid(uid_t uid) 837SYSCALL_DEFINE1(setfsuid, uid_t, uid)
837{ 838{
838 const struct cred *old; 839 const struct cred *old;
839 struct cred *new; 840 struct cred *new;
@@ -870,7 +871,7 @@ change_okay:
870/* 871/*
871 * Samma på svenska.. 872 * Samma på svenska..
872 */ 873 */
873asmlinkage long sys_setfsgid(gid_t gid) 874SYSCALL_DEFINE1(setfsgid, gid_t, gid)
874{ 875{
875 const struct cred *old; 876 const struct cred *old;
876 struct cred *new; 877 struct cred *new;
@@ -919,7 +920,7 @@ void do_sys_times(struct tms *tms)
919 tms->tms_cstime = cputime_to_clock_t(cstime); 920 tms->tms_cstime = cputime_to_clock_t(cstime);
920} 921}
921 922
922asmlinkage long sys_times(struct tms __user * tbuf) 923SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
923{ 924{
924 if (tbuf) { 925 if (tbuf) {
925 struct tms tmp; 926 struct tms tmp;
@@ -944,7 +945,7 @@ asmlinkage long sys_times(struct tms __user * tbuf)
944 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. 945 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
945 * LBT 04.03.94 946 * LBT 04.03.94
946 */ 947 */
947asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) 948SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
948{ 949{
949 struct task_struct *p; 950 struct task_struct *p;
950 struct task_struct *group_leader = current->group_leader; 951 struct task_struct *group_leader = current->group_leader;
@@ -1015,7 +1016,7 @@ out:
1015 return err; 1016 return err;
1016} 1017}
1017 1018
1018asmlinkage long sys_getpgid(pid_t pid) 1019SYSCALL_DEFINE1(getpgid, pid_t, pid)
1019{ 1020{
1020 struct task_struct *p; 1021 struct task_struct *p;
1021 struct pid *grp; 1022 struct pid *grp;
@@ -1045,14 +1046,14 @@ out:
1045 1046
1046#ifdef __ARCH_WANT_SYS_GETPGRP 1047#ifdef __ARCH_WANT_SYS_GETPGRP
1047 1048
1048asmlinkage long sys_getpgrp(void) 1049SYSCALL_DEFINE0(getpgrp)
1049{ 1050{
1050 return sys_getpgid(0); 1051 return sys_getpgid(0);
1051} 1052}
1052 1053
1053#endif 1054#endif
1054 1055
1055asmlinkage long sys_getsid(pid_t pid) 1056SYSCALL_DEFINE1(getsid, pid_t, pid)
1056{ 1057{
1057 struct task_struct *p; 1058 struct task_struct *p;
1058 struct pid *sid; 1059 struct pid *sid;
@@ -1080,7 +1081,7 @@ out:
1080 return retval; 1081 return retval;
1081} 1082}
1082 1083
1083asmlinkage long sys_setsid(void) 1084SYSCALL_DEFINE0(setsid)
1084{ 1085{
1085 struct task_struct *group_leader = current->group_leader; 1086 struct task_struct *group_leader = current->group_leader;
1086 struct pid *sid = task_pid(group_leader); 1087 struct pid *sid = task_pid(group_leader);
@@ -1311,7 +1312,7 @@ int set_current_groups(struct group_info *group_info)
1311 1312
1312EXPORT_SYMBOL(set_current_groups); 1313EXPORT_SYMBOL(set_current_groups);
1313 1314
1314asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) 1315SYSCALL_DEFINE2(getgroups, int, gidsetsize, gid_t __user *, grouplist)
1315{ 1316{
1316 const struct cred *cred = current_cred(); 1317 const struct cred *cred = current_cred();
1317 int i; 1318 int i;
@@ -1340,7 +1341,7 @@ out:
1340 * without another task interfering. 1341 * without another task interfering.
1341 */ 1342 */
1342 1343
1343asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) 1344SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
1344{ 1345{
1345 struct group_info *group_info; 1346 struct group_info *group_info;
1346 int retval; 1347 int retval;
@@ -1394,7 +1395,7 @@ EXPORT_SYMBOL(in_egroup_p);
1394 1395
1395DECLARE_RWSEM(uts_sem); 1396DECLARE_RWSEM(uts_sem);
1396 1397
1397asmlinkage long sys_newuname(struct new_utsname __user * name) 1398SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1398{ 1399{
1399 int errno = 0; 1400 int errno = 0;
1400 1401
@@ -1405,7 +1406,7 @@ asmlinkage long sys_newuname(struct new_utsname __user * name)
1405 return errno; 1406 return errno;
1406} 1407}
1407 1408
1408asmlinkage long sys_sethostname(char __user *name, int len) 1409SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1409{ 1410{
1410 int errno; 1411 int errno;
1411 char tmp[__NEW_UTS_LEN]; 1412 char tmp[__NEW_UTS_LEN];
@@ -1429,7 +1430,7 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1429 1430
1430#ifdef __ARCH_WANT_SYS_GETHOSTNAME 1431#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1431 1432
1432asmlinkage long sys_gethostname(char __user *name, int len) 1433SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1433{ 1434{
1434 int i, errno; 1435 int i, errno;
1435 struct new_utsname *u; 1436 struct new_utsname *u;
@@ -1454,7 +1455,7 @@ asmlinkage long sys_gethostname(char __user *name, int len)
1454 * Only setdomainname; getdomainname can be implemented by calling 1455 * Only setdomainname; getdomainname can be implemented by calling
1455 * uname() 1456 * uname()
1456 */ 1457 */
1457asmlinkage long sys_setdomainname(char __user *name, int len) 1458SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1458{ 1459{
1459 int errno; 1460 int errno;
1460 char tmp[__NEW_UTS_LEN]; 1461 char tmp[__NEW_UTS_LEN];
@@ -1477,7 +1478,7 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
1477 return errno; 1478 return errno;
1478} 1479}
1479 1480
1480asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1481SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1481{ 1482{
1482 if (resource >= RLIM_NLIMITS) 1483 if (resource >= RLIM_NLIMITS)
1483 return -EINVAL; 1484 return -EINVAL;
@@ -1496,7 +1497,8 @@ asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
1496 * Back compatibility for getrlimit. Needed for some apps. 1497 * Back compatibility for getrlimit. Needed for some apps.
1497 */ 1498 */
1498 1499
1499asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) 1500SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1501 struct rlimit __user *, rlim)
1500{ 1502{
1501 struct rlimit x; 1503 struct rlimit x;
1502 if (resource >= RLIM_NLIMITS) 1504 if (resource >= RLIM_NLIMITS)
@@ -1514,7 +1516,7 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
1514 1516
1515#endif 1517#endif
1516 1518
1517asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1519SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1518{ 1520{
1519 struct rlimit new_rlim, *old_rlim; 1521 struct rlimit new_rlim, *old_rlim;
1520 int retval; 1522 int retval;
@@ -1687,7 +1689,7 @@ int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1687 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; 1689 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1688} 1690}
1689 1691
1690asmlinkage long sys_getrusage(int who, struct rusage __user *ru) 1692SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1691{ 1693{
1692 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && 1694 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1693 who != RUSAGE_THREAD) 1695 who != RUSAGE_THREAD)
@@ -1695,14 +1697,14 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
1695 return getrusage(current, who, ru); 1697 return getrusage(current, who, ru);
1696} 1698}
1697 1699
1698asmlinkage long sys_umask(int mask) 1700SYSCALL_DEFINE1(umask, int, mask)
1699{ 1701{
1700 mask = xchg(&current->fs->umask, mask & S_IRWXUGO); 1702 mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
1701 return mask; 1703 return mask;
1702} 1704}
1703 1705
1704asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, 1706SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1705 unsigned long arg4, unsigned long arg5) 1707 unsigned long, arg4, unsigned long, arg5)
1706{ 1708{
1707 struct task_struct *me = current; 1709 struct task_struct *me = current;
1708 unsigned char comm[sizeof(me->comm)]; 1710 unsigned char comm[sizeof(me->comm)];
@@ -1815,8 +1817,8 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
1815 return error; 1817 return error;
1816} 1818}
1817 1819
1818asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep, 1820SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
1819 struct getcpu_cache __user *unused) 1821 struct getcpu_cache __user *, unused)
1820{ 1822{
1821 int err = 0; 1823 int err = 0;
1822 int cpu = raw_smp_processor_id(); 1824 int cpu = raw_smp_processor_id();
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index e14a23281707..27dad2967387 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -131,6 +131,7 @@ cond_syscall(sys_io_destroy);
131cond_syscall(sys_io_submit); 131cond_syscall(sys_io_submit);
132cond_syscall(sys_io_cancel); 132cond_syscall(sys_io_cancel);
133cond_syscall(sys_io_getevents); 133cond_syscall(sys_io_getevents);
134cond_syscall(sys_syslog);
134 135
135/* arch-specific weak syscall entries */ 136/* arch-specific weak syscall entries */
136cond_syscall(sys_pciconfig_read); 137cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 89d74436318c..790f9d785663 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -144,6 +144,7 @@ extern int acct_parm[];
144 144
145#ifdef CONFIG_IA64 145#ifdef CONFIG_IA64
146extern int no_unaligned_warning; 146extern int no_unaligned_warning;
147extern int unaligned_dump_stack;
147#endif 148#endif
148 149
149#ifdef CONFIG_RT_MUTEXES 150#ifdef CONFIG_RT_MUTEXES
@@ -781,6 +782,14 @@ static struct ctl_table kern_table[] = {
781 .mode = 0644, 782 .mode = 0644,
782 .proc_handler = &proc_dointvec, 783 .proc_handler = &proc_dointvec,
783 }, 784 },
785 {
786 .ctl_name = CTL_UNNUMBERED,
787 .procname = "unaligned-dump-stack",
788 .data = &unaligned_dump_stack,
789 .maxlen = sizeof (int),
790 .mode = 0644,
791 .proc_handler = &proc_dointvec,
792 },
784#endif 793#endif
785#ifdef CONFIG_DETECT_SOFTLOCKUP 794#ifdef CONFIG_DETECT_SOFTLOCKUP
786 { 795 {
@@ -800,7 +809,7 @@ static struct ctl_table kern_table[] = {
800 .data = &softlockup_thresh, 809 .data = &softlockup_thresh,
801 .maxlen = sizeof(int), 810 .maxlen = sizeof(int),
802 .mode = 0644, 811 .mode = 0644,
803 .proc_handler = &proc_dointvec_minmax, 812 .proc_handler = &proc_dosoftlockup_thresh,
804 .strategy = &sysctl_intvec, 813 .strategy = &sysctl_intvec,
805 .extra1 = &neg_one, 814 .extra1 = &neg_one,
806 .extra2 = &sixty, 815 .extra2 = &sixty,
@@ -1688,7 +1697,7 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol
1688 return error; 1697 return error;
1689} 1698}
1690 1699
1691asmlinkage long sys_sysctl(struct __sysctl_args __user *args) 1700SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
1692{ 1701{
1693 struct __sysctl_args tmp; 1702 struct __sysctl_args tmp;
1694 int error; 1703 int error;
@@ -2989,7 +2998,7 @@ int sysctl_ms_jiffies(struct ctl_table *table,
2989#else /* CONFIG_SYSCTL_SYSCALL */ 2998#else /* CONFIG_SYSCTL_SYSCALL */
2990 2999
2991 3000
2992asmlinkage long sys_sysctl(struct __sysctl_args __user *args) 3001SYSCALL_DEFINE1(sysctl, struct __sysctl_args __user *, args)
2993{ 3002{
2994 struct __sysctl_args tmp; 3003 struct __sysctl_args tmp;
2995 int error; 3004 int error;
diff --git a/kernel/time.c b/kernel/time.c
index 4886e3ce83a4..29511943871a 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(sys_tz);
60 * why not move it into the appropriate arch directory (for those 60 * why not move it into the appropriate arch directory (for those
61 * architectures that need it). 61 * architectures that need it).
62 */ 62 */
63asmlinkage long sys_time(time_t __user * tloc) 63SYSCALL_DEFINE1(time, time_t __user *, tloc)
64{ 64{
65 time_t i = get_seconds(); 65 time_t i = get_seconds();
66 66
@@ -79,7 +79,7 @@ asmlinkage long sys_time(time_t __user * tloc)
79 * architectures that need it). 79 * architectures that need it).
80 */ 80 */
81 81
82asmlinkage long sys_stime(time_t __user *tptr) 82SYSCALL_DEFINE1(stime, time_t __user *, tptr)
83{ 83{
84 struct timespec tv; 84 struct timespec tv;
85 int err; 85 int err;
@@ -99,8 +99,8 @@ asmlinkage long sys_stime(time_t __user *tptr)
99 99
100#endif /* __ARCH_WANT_SYS_TIME */ 100#endif /* __ARCH_WANT_SYS_TIME */
101 101
102asmlinkage long sys_gettimeofday(struct timeval __user *tv, 102SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
103 struct timezone __user *tz) 103 struct timezone __user *, tz)
104{ 104{
105 if (likely(tv != NULL)) { 105 if (likely(tv != NULL)) {
106 struct timeval ktv; 106 struct timeval ktv;
@@ -184,8 +184,8 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
184 return 0; 184 return 0;
185} 185}
186 186
187asmlinkage long sys_settimeofday(struct timeval __user *tv, 187SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
188 struct timezone __user *tz) 188 struct timezone __user *, tz)
189{ 189{
190 struct timeval user_tv; 190 struct timeval user_tv;
191 struct timespec new_ts; 191 struct timespec new_ts;
@@ -205,7 +205,7 @@ asmlinkage long sys_settimeofday(struct timeval __user *tv,
205 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); 205 return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
206} 206}
207 207
208asmlinkage long sys_adjtimex(struct timex __user *txc_p) 208SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
209{ 209{
210 struct timex txc; /* Local copy of parameter */ 210 struct timex txc; /* Local copy of parameter */
211 int ret; 211 int ret;
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 63e05d423a09..21a5ca849514 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -274,6 +274,21 @@ out_bc:
274} 274}
275 275
276/* 276/*
277 * Transfer the do_timer job away from a dying cpu.
278 *
279 * Called with interrupts disabled.
280 */
281static void tick_handover_do_timer(int *cpup)
282{
283 if (*cpup == tick_do_timer_cpu) {
284 int cpu = cpumask_first(cpu_online_mask);
285
286 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
287 TICK_DO_TIMER_NONE;
288 }
289}
290
291/*
277 * Shutdown an event device on a given cpu: 292 * Shutdown an event device on a given cpu:
278 * 293 *
279 * This is called on a life CPU, when a CPU is dead. So we cannot 294 * This is called on a life CPU, when a CPU is dead. So we cannot
@@ -297,13 +312,6 @@ static void tick_shutdown(unsigned int *cpup)
297 clockevents_exchange_device(dev, NULL); 312 clockevents_exchange_device(dev, NULL);
298 td->evtdev = NULL; 313 td->evtdev = NULL;
299 } 314 }
300 /* Transfer the do_timer job away from this cpu */
301 if (*cpup == tick_do_timer_cpu) {
302 int cpu = cpumask_first(cpu_online_mask);
303
304 tick_do_timer_cpu = (cpu < nr_cpu_ids) ? cpu :
305 TICK_DO_TIMER_NONE;
306 }
307 spin_unlock_irqrestore(&tick_device_lock, flags); 315 spin_unlock_irqrestore(&tick_device_lock, flags);
308} 316}
309 317
@@ -357,6 +365,10 @@ static int tick_notify(struct notifier_block *nb, unsigned long reason,
357 tick_broadcast_oneshot_control(reason); 365 tick_broadcast_oneshot_control(reason);
358 break; 366 break;
359 367
368 case CLOCK_EVT_NOTIFY_CPU_DYING:
369 tick_handover_do_timer(dev);
370 break;
371
360 case CLOCK_EVT_NOTIFY_CPU_DEAD: 372 case CLOCK_EVT_NOTIFY_CPU_DEAD:
361 tick_shutdown_broadcast_oneshot(dev); 373 tick_shutdown_broadcast_oneshot(dev);
362 tick_shutdown_broadcast(dev); 374 tick_shutdown_broadcast(dev);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1b6c05bd0d0a..d3f1ef4d5cbe 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -134,7 +134,7 @@ __setup("nohz=", setup_tick_nohz);
134 * value. We do this unconditionally on any cpu, as we don't know whether the 134 * value. We do this unconditionally on any cpu, as we don't know whether the
135 * cpu, which has the update task assigned is in a long sleep. 135 * cpu, which has the update task assigned is in a long sleep.
136 */ 136 */
137void tick_nohz_update_jiffies(void) 137static void tick_nohz_update_jiffies(void)
138{ 138{
139 int cpu = smp_processor_id(); 139 int cpu = smp_processor_id();
140 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 140 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
diff --git a/kernel/timer.c b/kernel/timer.c
index dee3f641a7a7..13dd64fe143d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1129,7 +1129,7 @@ void do_timer(unsigned long ticks)
1129 * For backwards compatibility? This can be done in libc so Alpha 1129 * For backwards compatibility? This can be done in libc so Alpha
1130 * and all newer ports shouldn't need it. 1130 * and all newer ports shouldn't need it.
1131 */ 1131 */
1132asmlinkage unsigned long sys_alarm(unsigned int seconds) 1132SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1133{ 1133{
1134 return alarm_setitimer(seconds); 1134 return alarm_setitimer(seconds);
1135} 1135}
@@ -1152,7 +1152,7 @@ asmlinkage unsigned long sys_alarm(unsigned int seconds)
1152 * 1152 *
1153 * This is SMP safe as current->tgid does not change. 1153 * This is SMP safe as current->tgid does not change.
1154 */ 1154 */
1155asmlinkage long sys_getpid(void) 1155SYSCALL_DEFINE0(getpid)
1156{ 1156{
1157 return task_tgid_vnr(current); 1157 return task_tgid_vnr(current);
1158} 1158}
@@ -1163,7 +1163,7 @@ asmlinkage long sys_getpid(void)
1163 * value of ->real_parent under rcu_read_lock(), see 1163 * value of ->real_parent under rcu_read_lock(), see
1164 * release_task()->call_rcu(delayed_put_task_struct). 1164 * release_task()->call_rcu(delayed_put_task_struct).
1165 */ 1165 */
1166asmlinkage long sys_getppid(void) 1166SYSCALL_DEFINE0(getppid)
1167{ 1167{
1168 int pid; 1168 int pid;
1169 1169
@@ -1174,25 +1174,25 @@ asmlinkage long sys_getppid(void)
1174 return pid; 1174 return pid;
1175} 1175}
1176 1176
1177asmlinkage long sys_getuid(void) 1177SYSCALL_DEFINE0(getuid)
1178{ 1178{
1179 /* Only we change this so SMP safe */ 1179 /* Only we change this so SMP safe */
1180 return current_uid(); 1180 return current_uid();
1181} 1181}
1182 1182
1183asmlinkage long sys_geteuid(void) 1183SYSCALL_DEFINE0(geteuid)
1184{ 1184{
1185 /* Only we change this so SMP safe */ 1185 /* Only we change this so SMP safe */
1186 return current_euid(); 1186 return current_euid();
1187} 1187}
1188 1188
1189asmlinkage long sys_getgid(void) 1189SYSCALL_DEFINE0(getgid)
1190{ 1190{
1191 /* Only we change this so SMP safe */ 1191 /* Only we change this so SMP safe */
1192 return current_gid(); 1192 return current_gid();
1193} 1193}
1194 1194
1195asmlinkage long sys_getegid(void) 1195SYSCALL_DEFINE0(getegid)
1196{ 1196{
1197 /* Only we change this so SMP safe */ 1197 /* Only we change this so SMP safe */
1198 return current_egid(); 1198 return current_egid();
@@ -1308,7 +1308,7 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1308EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1308EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1309 1309
1310/* Thread ID - the internal kernel "pid" */ 1310/* Thread ID - the internal kernel "pid" */
1311asmlinkage long sys_gettid(void) 1311SYSCALL_DEFINE0(gettid)
1312{ 1312{
1313 return task_pid_vnr(current); 1313 return task_pid_vnr(current);
1314} 1314}
@@ -1400,7 +1400,7 @@ out:
1400 return 0; 1400 return 0;
1401} 1401}
1402 1402
1403asmlinkage long sys_sysinfo(struct sysinfo __user *info) 1403SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1404{ 1404{
1405 struct sysinfo val; 1405 struct sysinfo val;
1406 1406
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2f32969c09df..7dcf6e9f2b04 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -17,6 +17,7 @@
17#include <linux/clocksource.h> 17#include <linux/clocksource.h>
18#include <linux/kallsyms.h> 18#include <linux/kallsyms.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/suspend.h>
20#include <linux/debugfs.h> 21#include <linux/debugfs.h>
21#include <linux/hardirq.h> 22#include <linux/hardirq.h>
22#include <linux/kthread.h> 23#include <linux/kthread.h>
@@ -1965,6 +1966,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
1965#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1966#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1966 1967
1967static atomic_t ftrace_graph_active; 1968static atomic_t ftrace_graph_active;
1969static struct notifier_block ftrace_suspend_notifier;
1968 1970
1969int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace) 1971int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
1970{ 1972{
@@ -2043,6 +2045,27 @@ static int start_graph_tracing(void)
2043 return ret; 2045 return ret;
2044} 2046}
2045 2047
2048/*
2049 * Hibernation protection.
2050 * The state of the current task is too much unstable during
2051 * suspend/restore to disk. We want to protect against that.
2052 */
2053static int
2054ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2055 void *unused)
2056{
2057 switch (state) {
2058 case PM_HIBERNATION_PREPARE:
2059 pause_graph_tracing();
2060 break;
2061
2062 case PM_POST_HIBERNATION:
2063 unpause_graph_tracing();
2064 break;
2065 }
2066 return NOTIFY_DONE;
2067}
2068
2046int register_ftrace_graph(trace_func_graph_ret_t retfunc, 2069int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2047 trace_func_graph_ent_t entryfunc) 2070 trace_func_graph_ent_t entryfunc)
2048{ 2071{
@@ -2050,6 +2073,9 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2050 2073
2051 mutex_lock(&ftrace_sysctl_lock); 2074 mutex_lock(&ftrace_sysctl_lock);
2052 2075
2076 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2077 register_pm_notifier(&ftrace_suspend_notifier);
2078
2053 atomic_inc(&ftrace_graph_active); 2079 atomic_inc(&ftrace_graph_active);
2054 ret = start_graph_tracing(); 2080 ret = start_graph_tracing();
2055 if (ret) { 2081 if (ret) {
@@ -2075,6 +2101,7 @@ void unregister_ftrace_graph(void)
2075 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; 2101 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
2076 ftrace_graph_entry = ftrace_graph_entry_stub; 2102 ftrace_graph_entry = ftrace_graph_entry_stub;
2077 ftrace_shutdown(FTRACE_STOP_FUNC_RET); 2103 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
2104 unregister_pm_notifier(&ftrace_suspend_notifier);
2078 2105
2079 mutex_unlock(&ftrace_sysctl_lock); 2106 mutex_unlock(&ftrace_sysctl_lock);
2080} 2107}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 8b0daf0662ef..bd38c5cfd8ad 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -246,7 +246,7 @@ static inline int test_time_stamp(u64 delta)
246 return 0; 246 return 0;
247} 247}
248 248
249#define BUF_PAGE_SIZE (PAGE_SIZE - sizeof(struct buffer_data_page)) 249#define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data))
250 250
251/* 251/*
252 * head_page == tail_page && head == tail then buffer is empty. 252 * head_page == tail_page && head == tail then buffer is empty.
@@ -1025,12 +1025,8 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1025 } 1025 }
1026 1026
1027 if (next_page == head_page) { 1027 if (next_page == head_page) {
1028 if (!(buffer->flags & RB_FL_OVERWRITE)) { 1028 if (!(buffer->flags & RB_FL_OVERWRITE))
1029 /* reset write */
1030 if (tail <= BUF_PAGE_SIZE)
1031 local_set(&tail_page->write, tail);
1032 goto out_unlock; 1029 goto out_unlock;
1033 }
1034 1030
1035 /* tail_page has not moved yet? */ 1031 /* tail_page has not moved yet? */
1036 if (tail_page == cpu_buffer->tail_page) { 1032 if (tail_page == cpu_buffer->tail_page) {
@@ -1105,6 +1101,10 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
1105 return event; 1101 return event;
1106 1102
1107 out_unlock: 1103 out_unlock:
1104 /* reset write */
1105 if (tail <= BUF_PAGE_SIZE)
1106 local_set(&tail_page->write, tail);
1107
1108 __raw_spin_unlock(&cpu_buffer->lock); 1108 __raw_spin_unlock(&cpu_buffer->lock);
1109 local_irq_restore(flags); 1109 local_irq_restore(flags);
1110 return NULL; 1110 return NULL;
@@ -2174,6 +2174,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2174 2174
2175 cpu_buffer->overrun = 0; 2175 cpu_buffer->overrun = 0;
2176 cpu_buffer->entries = 0; 2176 cpu_buffer->entries = 0;
2177
2178 cpu_buffer->write_stamp = 0;
2179 cpu_buffer->read_stamp = 0;
2177} 2180}
2178 2181
2179/** 2182/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c580233add95..17bb88d86ac2 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -40,7 +40,7 @@
40 40
41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) 41#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
42 42
43unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 43unsigned long __read_mostly tracing_max_latency;
44unsigned long __read_mostly tracing_thresh; 44unsigned long __read_mostly tracing_thresh;
45 45
46/* 46/*
@@ -3736,7 +3736,7 @@ static struct notifier_block trace_die_notifier = {
3736 * it if we decide to change what log level the ftrace dump 3736 * it if we decide to change what log level the ftrace dump
3737 * should be at. 3737 * should be at.
3738 */ 3738 */
3739#define KERN_TRACE KERN_INFO 3739#define KERN_TRACE KERN_EMERG
3740 3740
3741static void 3741static void
3742trace_printk_seq(struct trace_seq *s) 3742trace_printk_seq(struct trace_seq *s)
@@ -3770,6 +3770,7 @@ void ftrace_dump(void)
3770 dump_ran = 1; 3770 dump_ran = 1;
3771 3771
3772 /* No turning back! */ 3772 /* No turning back! */
3773 tracing_off();
3773 ftrace_kill(); 3774 ftrace_kill();
3774 3775
3775 for_each_tracing_cpu(cpu) { 3776 for_each_tracing_cpu(cpu) {
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 7c2e326bbc8b..62a78d943534 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -380,6 +380,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr)
380 380
381static void __irqsoff_tracer_init(struct trace_array *tr) 381static void __irqsoff_tracer_init(struct trace_array *tr)
382{ 382{
383 tracing_max_latency = 0;
383 irqsoff_trace = tr; 384 irqsoff_trace = tr;
384 /* make sure that the tracer is visible */ 385 /* make sure that the tracer is visible */
385 smp_wmb(); 386 smp_wmb();
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 43586b689e31..42ae1e77b6b3 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -333,6 +333,7 @@ static void stop_wakeup_tracer(struct trace_array *tr)
333 333
334static int wakeup_tracer_init(struct trace_array *tr) 334static int wakeup_tracer_init(struct trace_array *tr)
335{ 335{
336 tracing_max_latency = 0;
336 wakeup_trace = tr; 337 wakeup_trace = tr;
337 start_wakeup_tracer(tr); 338 start_wakeup_tracer(tr);
338 return 0; 339 return 0;
diff --git a/kernel/uid16.c b/kernel/uid16.c
index 2460c3199b5a..0314501688b9 100644
--- a/kernel/uid16.c
+++ b/kernel/uid16.c
@@ -17,7 +17,7 @@
17 17
18#include <asm/uaccess.h> 18#include <asm/uaccess.h>
19 19
20asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gid_t group) 20SYSCALL_DEFINE3(chown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
21{ 21{
22 long ret = sys_chown(filename, low2highuid(user), low2highgid(group)); 22 long ret = sys_chown(filename, low2highuid(user), low2highgid(group));
23 /* avoid REGPARM breakage on x86: */ 23 /* avoid REGPARM breakage on x86: */
@@ -25,7 +25,7 @@ asmlinkage long sys_chown16(const char __user * filename, old_uid_t user, old_gi
25 return ret; 25 return ret;
26} 26}
27 27
28asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_gid_t group) 28SYSCALL_DEFINE3(lchown16, const char __user *, filename, old_uid_t, user, old_gid_t, group)
29{ 29{
30 long ret = sys_lchown(filename, low2highuid(user), low2highgid(group)); 30 long ret = sys_lchown(filename, low2highuid(user), low2highgid(group));
31 /* avoid REGPARM breakage on x86: */ 31 /* avoid REGPARM breakage on x86: */
@@ -33,7 +33,7 @@ asmlinkage long sys_lchown16(const char __user * filename, old_uid_t user, old_g
33 return ret; 33 return ret;
34} 34}
35 35
36asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group) 36SYSCALL_DEFINE3(fchown16, unsigned int, fd, old_uid_t, user, old_gid_t, group)
37{ 37{
38 long ret = sys_fchown(fd, low2highuid(user), low2highgid(group)); 38 long ret = sys_fchown(fd, low2highuid(user), low2highgid(group));
39 /* avoid REGPARM breakage on x86: */ 39 /* avoid REGPARM breakage on x86: */
@@ -41,7 +41,7 @@ asmlinkage long sys_fchown16(unsigned int fd, old_uid_t user, old_gid_t group)
41 return ret; 41 return ret;
42} 42}
43 43
44asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid) 44SYSCALL_DEFINE2(setregid16, old_gid_t, rgid, old_gid_t, egid)
45{ 45{
46 long ret = sys_setregid(low2highgid(rgid), low2highgid(egid)); 46 long ret = sys_setregid(low2highgid(rgid), low2highgid(egid));
47 /* avoid REGPARM breakage on x86: */ 47 /* avoid REGPARM breakage on x86: */
@@ -49,7 +49,7 @@ asmlinkage long sys_setregid16(old_gid_t rgid, old_gid_t egid)
49 return ret; 49 return ret;
50} 50}
51 51
52asmlinkage long sys_setgid16(old_gid_t gid) 52SYSCALL_DEFINE1(setgid16, old_gid_t, gid)
53{ 53{
54 long ret = sys_setgid(low2highgid(gid)); 54 long ret = sys_setgid(low2highgid(gid));
55 /* avoid REGPARM breakage on x86: */ 55 /* avoid REGPARM breakage on x86: */
@@ -57,7 +57,7 @@ asmlinkage long sys_setgid16(old_gid_t gid)
57 return ret; 57 return ret;
58} 58}
59 59
60asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid) 60SYSCALL_DEFINE2(setreuid16, old_uid_t, ruid, old_uid_t, euid)
61{ 61{
62 long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid)); 62 long ret = sys_setreuid(low2highuid(ruid), low2highuid(euid));
63 /* avoid REGPARM breakage on x86: */ 63 /* avoid REGPARM breakage on x86: */
@@ -65,7 +65,7 @@ asmlinkage long sys_setreuid16(old_uid_t ruid, old_uid_t euid)
65 return ret; 65 return ret;
66} 66}
67 67
68asmlinkage long sys_setuid16(old_uid_t uid) 68SYSCALL_DEFINE1(setuid16, old_uid_t, uid)
69{ 69{
70 long ret = sys_setuid(low2highuid(uid)); 70 long ret = sys_setuid(low2highuid(uid));
71 /* avoid REGPARM breakage on x86: */ 71 /* avoid REGPARM breakage on x86: */
@@ -73,7 +73,7 @@ asmlinkage long sys_setuid16(old_uid_t uid)
73 return ret; 73 return ret;
74} 74}
75 75
76asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid) 76SYSCALL_DEFINE3(setresuid16, old_uid_t, ruid, old_uid_t, euid, old_uid_t, suid)
77{ 77{
78 long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid), 78 long ret = sys_setresuid(low2highuid(ruid), low2highuid(euid),
79 low2highuid(suid)); 79 low2highuid(suid));
@@ -82,7 +82,7 @@ asmlinkage long sys_setresuid16(old_uid_t ruid, old_uid_t euid, old_uid_t suid)
82 return ret; 82 return ret;
83} 83}
84 84
85asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid, old_uid_t __user *suid) 85SYSCALL_DEFINE3(getresuid16, old_uid_t __user *, ruid, old_uid_t __user *, euid, old_uid_t __user *, suid)
86{ 86{
87 const struct cred *cred = current_cred(); 87 const struct cred *cred = current_cred();
88 int retval; 88 int retval;
@@ -94,7 +94,7 @@ asmlinkage long sys_getresuid16(old_uid_t __user *ruid, old_uid_t __user *euid,
94 return retval; 94 return retval;
95} 95}
96 96
97asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid) 97SYSCALL_DEFINE3(setresgid16, old_gid_t, rgid, old_gid_t, egid, old_gid_t, sgid)
98{ 98{
99 long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid), 99 long ret = sys_setresgid(low2highgid(rgid), low2highgid(egid),
100 low2highgid(sgid)); 100 low2highgid(sgid));
@@ -103,7 +103,8 @@ asmlinkage long sys_setresgid16(old_gid_t rgid, old_gid_t egid, old_gid_t sgid)
103 return ret; 103 return ret;
104} 104}
105 105
106asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid, old_gid_t __user *sgid) 106
107SYSCALL_DEFINE3(getresgid16, old_gid_t __user *, rgid, old_gid_t __user *, egid, old_gid_t __user *, sgid)
107{ 108{
108 const struct cred *cred = current_cred(); 109 const struct cred *cred = current_cred();
109 int retval; 110 int retval;
@@ -115,7 +116,7 @@ asmlinkage long sys_getresgid16(old_gid_t __user *rgid, old_gid_t __user *egid,
115 return retval; 116 return retval;
116} 117}
117 118
118asmlinkage long sys_setfsuid16(old_uid_t uid) 119SYSCALL_DEFINE1(setfsuid16, old_uid_t, uid)
119{ 120{
120 long ret = sys_setfsuid(low2highuid(uid)); 121 long ret = sys_setfsuid(low2highuid(uid));
121 /* avoid REGPARM breakage on x86: */ 122 /* avoid REGPARM breakage on x86: */
@@ -123,7 +124,7 @@ asmlinkage long sys_setfsuid16(old_uid_t uid)
123 return ret; 124 return ret;
124} 125}
125 126
126asmlinkage long sys_setfsgid16(old_gid_t gid) 127SYSCALL_DEFINE1(setfsgid16, old_gid_t, gid)
127{ 128{
128 long ret = sys_setfsgid(low2highgid(gid)); 129 long ret = sys_setfsgid(low2highgid(gid));
129 /* avoid REGPARM breakage on x86: */ 130 /* avoid REGPARM breakage on x86: */
@@ -161,7 +162,7 @@ static int groups16_from_user(struct group_info *group_info,
161 return 0; 162 return 0;
162} 163}
163 164
164asmlinkage long sys_getgroups16(int gidsetsize, old_gid_t __user *grouplist) 165SYSCALL_DEFINE2(getgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
165{ 166{
166 const struct cred *cred = current_cred(); 167 const struct cred *cred = current_cred();
167 int i; 168 int i;
@@ -184,7 +185,7 @@ out:
184 return i; 185 return i;
185} 186}
186 187
187asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist) 188SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
188{ 189{
189 struct group_info *group_info; 190 struct group_info *group_info;
190 int retval; 191 int retval;
@@ -209,22 +210,22 @@ asmlinkage long sys_setgroups16(int gidsetsize, old_gid_t __user *grouplist)
209 return retval; 210 return retval;
210} 211}
211 212
212asmlinkage long sys_getuid16(void) 213SYSCALL_DEFINE0(getuid16)
213{ 214{
214 return high2lowuid(current_uid()); 215 return high2lowuid(current_uid());
215} 216}
216 217
217asmlinkage long sys_geteuid16(void) 218SYSCALL_DEFINE0(geteuid16)
218{ 219{
219 return high2lowuid(current_euid()); 220 return high2lowuid(current_euid());
220} 221}
221 222
222asmlinkage long sys_getgid16(void) 223SYSCALL_DEFINE0(getgid16)
223{ 224{
224 return high2lowgid(current_gid()); 225 return high2lowgid(current_gid());
225} 226}
226 227
227asmlinkage long sys_getegid16(void) 228SYSCALL_DEFINE0(getegid16)
228{ 229{
229 return high2lowgid(current_egid()); 230 return high2lowgid(current_egid());
230} 231}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2f445833ae37..1f0c509b40d3 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -971,6 +971,8 @@ undo:
971} 971}
972 972
973#ifdef CONFIG_SMP 973#ifdef CONFIG_SMP
974static struct workqueue_struct *work_on_cpu_wq __read_mostly;
975
974struct work_for_cpu { 976struct work_for_cpu {
975 struct work_struct work; 977 struct work_struct work;
976 long (*fn)(void *); 978 long (*fn)(void *);
@@ -991,8 +993,8 @@ static void do_work_for_cpu(struct work_struct *w)
991 * @fn: the function to run 993 * @fn: the function to run
992 * @arg: the function arg 994 * @arg: the function arg
993 * 995 *
994 * This will return -EINVAL in the cpu is not online, or the return value 996 * This will return the value @fn returns.
995 * of @fn otherwise. 997 * It is up to the caller to ensure that the cpu doesn't go offline.
996 */ 998 */
997long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) 999long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
998{ 1000{
@@ -1001,14 +1003,8 @@ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1001 INIT_WORK(&wfc.work, do_work_for_cpu); 1003 INIT_WORK(&wfc.work, do_work_for_cpu);
1002 wfc.fn = fn; 1004 wfc.fn = fn;
1003 wfc.arg = arg; 1005 wfc.arg = arg;
1004 get_online_cpus(); 1006 queue_work_on(cpu, work_on_cpu_wq, &wfc.work);
1005 if (unlikely(!cpu_online(cpu))) 1007 flush_work(&wfc.work);
1006 wfc.ret = -EINVAL;
1007 else {
1008 schedule_work_on(cpu, &wfc.work);
1009 flush_work(&wfc.work);
1010 }
1011 put_online_cpus();
1012 1008
1013 return wfc.ret; 1009 return wfc.ret;
1014} 1010}
@@ -1025,4 +1021,8 @@ void __init init_workqueues(void)
1025 hotcpu_notifier(workqueue_cpu_callback, 0); 1021 hotcpu_notifier(workqueue_cpu_callback, 0);
1026 keventd_wq = create_workqueue("events"); 1022 keventd_wq = create_workqueue("events");
1027 BUG_ON(!keventd_wq); 1023 BUG_ON(!keventd_wq);
1024#ifdef CONFIG_SMP
1025 work_on_cpu_wq = create_workqueue("work_on_cpu");
1026 BUG_ON(!work_on_cpu_wq);
1027#endif
1028} 1028}