aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/auditsc.c9
-rw-r--r--kernel/cgroup.c4
-rw-r--r--kernel/compat.c58
-rw-r--r--kernel/dma-coherent.c2
-rw-r--r--kernel/dma.c2
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/irq/manage.c9
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/kmod.c67
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/ksysfs.c35
-rw-r--r--kernel/module.c45
-rw-r--r--kernel/panic.c65
-rw-r--r--kernel/power/disk.c11
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/power/user.c10
-rw-r--r--kernel/printk.c42
-rw-r--r--kernel/profile.c41
-rw-r--r--kernel/resource.c150
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/softirq.c13
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/sys.c38
-rw-r--r--kernel/sys_ni.c6
-rw-r--r--kernel/sysctl.c123
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/tick-sched.c13
-rw-r--r--kernel/timer.c1
-rw-r--r--kernel/utsname_sysctl.c5
-rw-r--r--kernel/wait.c14
-rw-r--r--kernel/workqueue.c2
33 files changed, 551 insertions, 245 deletions
diff --git a/kernel/acct.c b/kernel/acct.c
index dd68b9059418..f6006a60df5d 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -548,7 +548,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
548#endif 548#endif
549 549
550 spin_lock_irq(&current->sighand->siglock); 550 spin_lock_irq(&current->sighand->siglock);
551 tty = current->signal->tty; 551 tty = current->signal->tty; /* Safe as we hold the siglock */
552 ac.ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0; 552 ac.ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0;
553 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); 553 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
554 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime))); 554 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 59cedfb040e7..cf5bc2f5f9c3 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -246,8 +246,8 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
246 unsigned n; 246 unsigned n;
247 if (unlikely(!ctx)) 247 if (unlikely(!ctx))
248 return 0; 248 return 0;
249
250 n = ctx->major; 249 n = ctx->major;
250
251 switch (audit_classify_syscall(ctx->arch, n)) { 251 switch (audit_classify_syscall(ctx->arch, n)) {
252 case 0: /* native */ 252 case 0: /* native */
253 if ((mask & AUDIT_PERM_WRITE) && 253 if ((mask & AUDIT_PERM_WRITE) &&
@@ -1204,13 +1204,13 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1204 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", 1204 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
1205 context->return_code); 1205 context->return_code);
1206 1206
1207 mutex_lock(&tty_mutex); 1207 spin_lock_irq(&tsk->sighand->siglock);
1208 read_lock(&tasklist_lock);
1209 if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name) 1208 if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
1210 tty = tsk->signal->tty->name; 1209 tty = tsk->signal->tty->name;
1211 else 1210 else
1212 tty = "(none)"; 1211 tty = "(none)";
1213 read_unlock(&tasklist_lock); 1212 spin_unlock_irq(&tsk->sighand->siglock);
1213
1214 audit_log_format(ab, 1214 audit_log_format(ab,
1215 " a0=%lx a1=%lx a2=%lx a3=%lx items=%d" 1215 " a0=%lx a1=%lx a2=%lx a3=%lx items=%d"
1216 " ppid=%d pid=%d auid=%u uid=%u gid=%u" 1216 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
@@ -1230,7 +1230,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1230 context->egid, context->sgid, context->fsgid, tty, 1230 context->egid, context->sgid, context->fsgid, tty,
1231 tsk->sessionid); 1231 tsk->sessionid);
1232 1232
1233 mutex_unlock(&tty_mutex);
1234 1233
1235 audit_log_task_info(ab, tsk); 1234 audit_log_task_info(ab, tsk);
1236 if (context->filterkey) { 1235 if (context->filterkey) {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0123d75ec9a..8c6e1c17e6d3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2735,6 +2735,8 @@ void cgroup_fork_callbacks(struct task_struct *child)
2735 * Called on every change to mm->owner. mm_init_owner() does not 2735 * Called on every change to mm->owner. mm_init_owner() does not
2736 * invoke this routine, since it assigns the mm->owner the first time 2736 * invoke this routine, since it assigns the mm->owner the first time
2737 * and does not change it. 2737 * and does not change it.
2738 *
2739 * The callbacks are invoked with mmap_sem held in read mode.
2738 */ 2740 */
2739void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) 2741void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
2740{ 2742{
@@ -2750,7 +2752,7 @@ void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
2750 if (oldcgrp == newcgrp) 2752 if (oldcgrp == newcgrp)
2751 continue; 2753 continue;
2752 if (ss->mm_owner_changed) 2754 if (ss->mm_owner_changed)
2753 ss->mm_owner_changed(ss, oldcgrp, newcgrp); 2755 ss->mm_owner_changed(ss, oldcgrp, newcgrp, new);
2754 } 2756 }
2755 } 2757 }
2756} 2758}
diff --git a/kernel/compat.c b/kernel/compat.c
index 32c254a8ab9a..143990e48cb9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -26,6 +26,64 @@
26 26
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28 28
29/*
30 * Note that the native side is already converted to a timespec, because
31 * that's what we want anyway.
32 */
33static int compat_get_timeval(struct timespec *o,
34 struct compat_timeval __user *i)
35{
36 long usec;
37
38 if (get_user(o->tv_sec, &i->tv_sec) ||
39 get_user(usec, &i->tv_usec))
40 return -EFAULT;
41 o->tv_nsec = usec * 1000;
42 return 0;
43}
44
45static int compat_put_timeval(struct compat_timeval __user *o,
46 struct timeval *i)
47{
48 return (put_user(i->tv_sec, &o->tv_sec) ||
49 put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
50}
51
52asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
53 struct timezone __user *tz)
54{
55 if (tv) {
56 struct timeval ktv;
57 do_gettimeofday(&ktv);
58 if (compat_put_timeval(tv, &ktv))
59 return -EFAULT;
60 }
61 if (tz) {
62 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
63 return -EFAULT;
64 }
65
66 return 0;
67}
68
69asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
70 struct timezone __user *tz)
71{
72 struct timespec kts;
73 struct timezone ktz;
74
75 if (tv) {
76 if (compat_get_timeval(&kts, tv))
77 return -EFAULT;
78 }
79 if (tz) {
80 if (copy_from_user(&ktz, tz, sizeof(ktz)))
81 return -EFAULT;
82 }
83
84 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
85}
86
29int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) 87int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
30{ 88{
31 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || 89 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index c1d4d5b4c61c..f013a0c2e111 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -124,6 +124,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
124 } 124 }
125 return (mem != NULL); 125 return (mem != NULL);
126} 126}
127EXPORT_SYMBOL(dma_alloc_from_coherent);
127 128
128/** 129/**
129 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool 130 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
@@ -151,3 +152,4 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
151 } 152 }
152 return 0; 153 return 0;
153} 154}
155EXPORT_SYMBOL(dma_release_from_coherent);
diff --git a/kernel/dma.c b/kernel/dma.c
index d2c60a822790..f903189c5304 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -1,4 +1,4 @@
1/* $Id: dma.c,v 1.7 1994/12/28 03:35:33 root Exp root $ 1/*
2 * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c. 2 * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
3 * 3 *
4 * Written by Hennus Bergman, 1992. 4 * Written by Hennus Bergman, 1992.
diff --git a/kernel/exit.c b/kernel/exit.c
index 85a83c831856..0ef4673e351b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -640,24 +640,23 @@ retry:
640assign_new_owner: 640assign_new_owner:
641 BUG_ON(c == p); 641 BUG_ON(c == p);
642 get_task_struct(c); 642 get_task_struct(c);
643 read_unlock(&tasklist_lock);
644 down_write(&mm->mmap_sem);
643 /* 645 /*
644 * The task_lock protects c->mm from changing. 646 * The task_lock protects c->mm from changing.
645 * We always want mm->owner->mm == mm 647 * We always want mm->owner->mm == mm
646 */ 648 */
647 task_lock(c); 649 task_lock(c);
648 /*
649 * Delay read_unlock() till we have the task_lock()
650 * to ensure that c does not slip away underneath us
651 */
652 read_unlock(&tasklist_lock);
653 if (c->mm != mm) { 650 if (c->mm != mm) {
654 task_unlock(c); 651 task_unlock(c);
652 up_write(&mm->mmap_sem);
655 put_task_struct(c); 653 put_task_struct(c);
656 goto retry; 654 goto retry;
657 } 655 }
658 cgroup_mm_owner_callbacks(mm->owner, c); 656 cgroup_mm_owner_callbacks(mm->owner, c);
659 mm->owner = c; 657 mm->owner = c;
660 task_unlock(c); 658 task_unlock(c);
659 up_write(&mm->mmap_sem);
661 put_task_struct(c); 660 put_task_struct(c);
662} 661}
663#endif /* CONFIG_MM_OWNER */ 662#endif /* CONFIG_MM_OWNER */
diff --git a/kernel/fork.c b/kernel/fork.c
index 7ce2ebe84796..30de644a40c4 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -802,6 +802,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
802 802
803 sig->leader = 0; /* session leadership doesn't inherit */ 803 sig->leader = 0; /* session leadership doesn't inherit */
804 sig->tty_old_pgrp = NULL; 804 sig->tty_old_pgrp = NULL;
805 sig->tty = NULL;
805 806
806 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 807 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
807 sig->gtime = cputime_zero; 808 sig->gtime = cputime_zero;
@@ -838,6 +839,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
838void __cleanup_signal(struct signal_struct *sig) 839void __cleanup_signal(struct signal_struct *sig)
839{ 840{
840 exit_thread_group_keys(sig); 841 exit_thread_group_keys(sig);
842 tty_kref_put(sig->tty);
841 kmem_cache_free(signal_cachep, sig); 843 kmem_cache_free(signal_cachep, sig);
842} 844}
843 845
@@ -1227,7 +1229,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1227 p->nsproxy->pid_ns->child_reaper = p; 1229 p->nsproxy->pid_ns->child_reaper = p;
1228 1230
1229 p->signal->leader_pid = pid; 1231 p->signal->leader_pid = pid;
1230 p->signal->tty = current->signal->tty; 1232 tty_kref_put(p->signal->tty);
1233 p->signal->tty = tty_kref_get(current->signal->tty);
1231 set_task_pgrp(p, task_pgrp_nr(current)); 1234 set_task_pgrp(p, task_pgrp_nr(current));
1232 set_task_session(p, task_session_nr(current)); 1235 set_task_session(p, task_session_nr(current));
1233 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1236 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0314074fa232..60c49e324390 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -89,7 +89,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
89 set_balance_irq_affinity(irq, cpumask); 89 set_balance_irq_affinity(irq, cpumask);
90 90
91#ifdef CONFIG_GENERIC_PENDING_IRQ 91#ifdef CONFIG_GENERIC_PENDING_IRQ
92 set_pending_irq(irq, cpumask); 92 if (desc->status & IRQ_MOVE_PCNTXT) {
93 unsigned long flags;
94
95 spin_lock_irqsave(&desc->lock, flags);
96 desc->chip->set_affinity(irq, cpumask);
97 spin_unlock_irqrestore(&desc->lock, flags);
98 } else
99 set_pending_irq(irq, cpumask);
93#else 100#else
94 desc->affinity = cpumask; 101 desc->affinity = cpumask;
95 desc->chip->set_affinity(irq, cpumask); 102 desc->chip->set_affinity(irq, cpumask);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 38fc10ac7541..5072cf1685a2 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -260,7 +260,6 @@ const char *kallsyms_lookup(unsigned long addr,
260 /* see if it's in a module */ 260 /* see if it's in a module */
261 return module_address_lookup(addr, symbolsize, offset, modname, 261 return module_address_lookup(addr, symbolsize, offset, modname,
262 namebuf); 262 namebuf);
263 return NULL;
264} 263}
265 264
266int lookup_symbol_name(unsigned long addr, char *symname) 265int lookup_symbol_name(unsigned long addr, char *symname)
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2456d1a0befb..3d3c3ea3a023 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -113,7 +113,7 @@ int request_module(const char *fmt, ...)
113 return ret; 113 return ret;
114} 114}
115EXPORT_SYMBOL(request_module); 115EXPORT_SYMBOL(request_module);
116#endif /* CONFIG_KMOD */ 116#endif /* CONFIG_MODULES */
117 117
118struct subprocess_info { 118struct subprocess_info {
119 struct work_struct work; 119 struct work_struct work;
@@ -265,7 +265,7 @@ static void __call_usermodehelper(struct work_struct *work)
265 } 265 }
266} 266}
267 267
268#ifdef CONFIG_PM 268#ifdef CONFIG_PM_SLEEP
269/* 269/*
270 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY 270 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
271 * (used for preventing user land processes from being created after the user 271 * (used for preventing user land processes from being created after the user
@@ -288,39 +288,37 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
288 */ 288 */
289#define RUNNING_HELPERS_TIMEOUT (5 * HZ) 289#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
290 290
291static int usermodehelper_pm_callback(struct notifier_block *nfb, 291/**
292 unsigned long action, 292 * usermodehelper_disable - prevent new helpers from being started
293 void *ignored) 293 */
294int usermodehelper_disable(void)
294{ 295{
295 long retval; 296 long retval;
296 297
297 switch (action) { 298 usermodehelper_disabled = 1;
298 case PM_HIBERNATION_PREPARE: 299 smp_mb();
299 case PM_SUSPEND_PREPARE: 300 /*
300 usermodehelper_disabled = 1; 301 * From now on call_usermodehelper_exec() won't start any new
301 smp_mb(); 302 * helpers, so it is sufficient if running_helpers turns out to
302 /* 303 * be zero at one point (it may be increased later, but that
303 * From now on call_usermodehelper_exec() won't start any new 304 * doesn't matter).
304 * helpers, so it is sufficient if running_helpers turns out to 305 */
305 * be zero at one point (it may be increased later, but that 306 retval = wait_event_timeout(running_helpers_waitq,
306 * doesn't matter).
307 */
308 retval = wait_event_timeout(running_helpers_waitq,
309 atomic_read(&running_helpers) == 0, 307 atomic_read(&running_helpers) == 0,
310 RUNNING_HELPERS_TIMEOUT); 308 RUNNING_HELPERS_TIMEOUT);
311 if (retval) { 309 if (retval)
312 return NOTIFY_OK; 310 return 0;
313 } else {
314 usermodehelper_disabled = 0;
315 return NOTIFY_BAD;
316 }
317 case PM_POST_HIBERNATION:
318 case PM_POST_SUSPEND:
319 usermodehelper_disabled = 0;
320 return NOTIFY_OK;
321 }
322 311
323 return NOTIFY_DONE; 312 usermodehelper_disabled = 0;
313 return -EAGAIN;
314}
315
316/**
317 * usermodehelper_enable - allow new helpers to be started again
318 */
319void usermodehelper_enable(void)
320{
321 usermodehelper_disabled = 0;
324} 322}
325 323
326static void helper_lock(void) 324static void helper_lock(void)
@@ -334,18 +332,12 @@ static void helper_unlock(void)
334 if (atomic_dec_and_test(&running_helpers)) 332 if (atomic_dec_and_test(&running_helpers))
335 wake_up(&running_helpers_waitq); 333 wake_up(&running_helpers_waitq);
336} 334}
337 335#else /* CONFIG_PM_SLEEP */
338static void register_pm_notifier_callback(void)
339{
340 pm_notifier(usermodehelper_pm_callback, 0);
341}
342#else /* CONFIG_PM */
343#define usermodehelper_disabled 0 336#define usermodehelper_disabled 0
344 337
345static inline void helper_lock(void) {} 338static inline void helper_lock(void) {}
346static inline void helper_unlock(void) {} 339static inline void helper_unlock(void) {}
347static inline void register_pm_notifier_callback(void) {} 340#endif /* CONFIG_PM_SLEEP */
348#endif /* CONFIG_PM */
349 341
350/** 342/**
351 * call_usermodehelper_setup - prepare to call a usermode helper 343 * call_usermodehelper_setup - prepare to call a usermode helper
@@ -515,5 +507,4 @@ void __init usermodehelper_init(void)
515{ 507{
516 khelper_wq = create_singlethread_workqueue("khelper"); 508 khelper_wq = create_singlethread_workqueue("khelper");
517 BUG_ON(!khelper_wq); 509 BUG_ON(!khelper_wq);
518 register_pm_notifier_callback();
519} 510}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 75bc2cd9ebc6..8b57a2597f21 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -404,7 +404,7 @@ void kretprobe_hash_lock(struct task_struct *tsk,
404 spin_lock_irqsave(hlist_lock, *flags); 404 spin_lock_irqsave(hlist_lock, *flags);
405} 405}
406 406
407void kretprobe_table_lock(unsigned long hash, unsigned long *flags) 407static void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
408{ 408{
409 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 409 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
410 spin_lock_irqsave(hlist_lock, *flags); 410 spin_lock_irqsave(hlist_lock, *flags);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index e53bc30e9ba5..08dd8ed86c77 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kexec.h> 16#include <linux/kexec.h>
17#include <linux/profile.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
18 19
19#define KERNEL_ATTR_RO(_name) \ 20#define KERNEL_ATTR_RO(_name) \
@@ -53,6 +54,37 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
53KERNEL_ATTR_RW(uevent_helper); 54KERNEL_ATTR_RW(uevent_helper);
54#endif 55#endif
55 56
57#ifdef CONFIG_PROFILING
58static ssize_t profiling_show(struct kobject *kobj,
59 struct kobj_attribute *attr, char *buf)
60{
61 return sprintf(buf, "%d\n", prof_on);
62}
63static ssize_t profiling_store(struct kobject *kobj,
64 struct kobj_attribute *attr,
65 const char *buf, size_t count)
66{
67 int ret;
68
69 if (prof_on)
70 return -EEXIST;
71 /*
72 * This eventually calls into get_option() which
73 * has a ton of callers and is not const. It is
74 * easiest to cast it away here.
75 */
76 profile_setup((char *)buf);
77 ret = profile_init();
78 if (ret)
79 return ret;
80 ret = create_proc_profile();
81 if (ret)
82 return ret;
83 return count;
84}
85KERNEL_ATTR_RW(profiling);
86#endif
87
56#ifdef CONFIG_KEXEC 88#ifdef CONFIG_KEXEC
57static ssize_t kexec_loaded_show(struct kobject *kobj, 89static ssize_t kexec_loaded_show(struct kobject *kobj,
58 struct kobj_attribute *attr, char *buf) 90 struct kobj_attribute *attr, char *buf)
@@ -109,6 +141,9 @@ static struct attribute * kernel_attrs[] = {
109 &uevent_seqnum_attr.attr, 141 &uevent_seqnum_attr.attr,
110 &uevent_helper_attr.attr, 142 &uevent_helper_attr.attr,
111#endif 143#endif
144#ifdef CONFIG_PROFILING
145 &profiling_attr.attr,
146#endif
112#ifdef CONFIG_KEXEC 147#ifdef CONFIG_KEXEC
113 &kexec_loaded_attr.attr, 148 &kexec_loaded_attr.attr,
114 &kexec_crash_loaded_attr.attr, 149 &kexec_crash_loaded_attr.attr,
diff --git a/kernel/module.c b/kernel/module.c
index 9db11911e04b..b7205f67cfaf 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -100,7 +100,7 @@ static inline int strong_try_module_get(struct module *mod)
100static inline void add_taint_module(struct module *mod, unsigned flag) 100static inline void add_taint_module(struct module *mod, unsigned flag)
101{ 101{
102 add_taint(flag); 102 add_taint(flag);
103 mod->taints |= flag; 103 mod->taints |= (1U << flag);
104} 104}
105 105
106/* 106/*
@@ -784,6 +784,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
784 mutex_lock(&module_mutex); 784 mutex_lock(&module_mutex);
785 /* Store the name of the last unloaded module for diagnostic purposes */ 785 /* Store the name of the last unloaded module for diagnostic purposes */
786 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 786 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
787 unregister_dynamic_debug_module(mod->name);
787 free_module(mod); 788 free_module(mod);
788 789
789 out: 790 out:
@@ -923,7 +924,7 @@ static const char vermagic[] = VERMAGIC_STRING;
923static int try_to_force_load(struct module *mod, const char *symname) 924static int try_to_force_load(struct module *mod, const char *symname)
924{ 925{
925#ifdef CONFIG_MODULE_FORCE_LOAD 926#ifdef CONFIG_MODULE_FORCE_LOAD
926 if (!(tainted & TAINT_FORCED_MODULE)) 927 if (!test_taint(TAINT_FORCED_MODULE))
927 printk("%s: no version for \"%s\" found: kernel tainted.\n", 928 printk("%s: no version for \"%s\" found: kernel tainted.\n",
928 mod->name, symname); 929 mod->name, symname);
929 add_taint_module(mod, TAINT_FORCED_MODULE); 930 add_taint_module(mod, TAINT_FORCED_MODULE);
@@ -1033,7 +1034,7 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
1033 const unsigned long *crc; 1034 const unsigned long *crc;
1034 1035
1035 ret = find_symbol(name, &owner, &crc, 1036 ret = find_symbol(name, &owner, &crc,
1036 !(mod->taints & TAINT_PROPRIETARY_MODULE), true); 1037 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1037 if (!IS_ERR_VALUE(ret)) { 1038 if (!IS_ERR_VALUE(ret)) {
1038 /* use_module can fail due to OOM, 1039 /* use_module can fail due to OOM,
1039 or module initialization or unloading */ 1040 or module initialization or unloading */
@@ -1173,7 +1174,7 @@ static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1173 while (i-- > 0) 1174 while (i-- > 0)
1174 sysfs_remove_bin_file(notes_attrs->dir, 1175 sysfs_remove_bin_file(notes_attrs->dir,
1175 &notes_attrs->attrs[i]); 1176 &notes_attrs->attrs[i]);
1176 kobject_del(notes_attrs->dir); 1177 kobject_put(notes_attrs->dir);
1177 } 1178 }
1178 kfree(notes_attrs); 1179 kfree(notes_attrs);
1179} 1180}
@@ -1634,7 +1635,7 @@ static void set_license(struct module *mod, const char *license)
1634 license = "unspecified"; 1635 license = "unspecified";
1635 1636
1636 if (!license_is_gpl_compatible(license)) { 1637 if (!license_is_gpl_compatible(license)) {
1637 if (!(tainted & TAINT_PROPRIETARY_MODULE)) 1638 if (!test_taint(TAINT_PROPRIETARY_MODULE))
1638 printk(KERN_WARNING "%s: module license '%s' taints " 1639 printk(KERN_WARNING "%s: module license '%s' taints "
1639 "kernel.\n", mod->name, license); 1640 "kernel.\n", mod->name, license);
1640 add_taint_module(mod, TAINT_PROPRIETARY_MODULE); 1641 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
@@ -1783,6 +1784,33 @@ static inline void add_kallsyms(struct module *mod,
1783} 1784}
1784#endif /* CONFIG_KALLSYMS */ 1785#endif /* CONFIG_KALLSYMS */
1785 1786
1787#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG
1788static void dynamic_printk_setup(Elf_Shdr *sechdrs, unsigned int verboseindex)
1789{
1790 struct mod_debug *debug_info;
1791 unsigned long pos, end;
1792 unsigned int num_verbose;
1793
1794 pos = sechdrs[verboseindex].sh_addr;
1795 num_verbose = sechdrs[verboseindex].sh_size /
1796 sizeof(struct mod_debug);
1797 end = pos + (num_verbose * sizeof(struct mod_debug));
1798
1799 for (; pos < end; pos += sizeof(struct mod_debug)) {
1800 debug_info = (struct mod_debug *)pos;
1801 register_dynamic_debug_module(debug_info->modname,
1802 debug_info->type, debug_info->logical_modname,
1803 debug_info->flag_names, debug_info->hash,
1804 debug_info->hash2);
1805 }
1806}
1807#else
1808static inline void dynamic_printk_setup(Elf_Shdr *sechdrs,
1809 unsigned int verboseindex)
1810{
1811}
1812#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
1813
1786static void *module_alloc_update_bounds(unsigned long size) 1814static void *module_alloc_update_bounds(unsigned long size)
1787{ 1815{
1788 void *ret = module_alloc(size); 1816 void *ret = module_alloc(size);
@@ -1831,6 +1859,7 @@ static noinline struct module *load_module(void __user *umod,
1831#endif 1859#endif
1832 unsigned int markersindex; 1860 unsigned int markersindex;
1833 unsigned int markersstringsindex; 1861 unsigned int markersstringsindex;
1862 unsigned int verboseindex;
1834 struct module *mod; 1863 struct module *mod;
1835 long err = 0; 1864 long err = 0;
1836 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 1865 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
@@ -2117,6 +2146,7 @@ static noinline struct module *load_module(void __user *umod,
2117 markersindex = find_sec(hdr, sechdrs, secstrings, "__markers"); 2146 markersindex = find_sec(hdr, sechdrs, secstrings, "__markers");
2118 markersstringsindex = find_sec(hdr, sechdrs, secstrings, 2147 markersstringsindex = find_sec(hdr, sechdrs, secstrings,
2119 "__markers_strings"); 2148 "__markers_strings");
2149 verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose");
2120 2150
2121 /* Now do relocations. */ 2151 /* Now do relocations. */
2122 for (i = 1; i < hdr->e_shnum; i++) { 2152 for (i = 1; i < hdr->e_shnum; i++) {
@@ -2167,6 +2197,7 @@ static noinline struct module *load_module(void __user *umod,
2167 marker_update_probe_range(mod->markers, 2197 marker_update_probe_range(mod->markers,
2168 mod->markers + mod->num_markers); 2198 mod->markers + mod->num_markers);
2169#endif 2199#endif
2200 dynamic_printk_setup(sechdrs, verboseindex);
2170 err = module_finalize(hdr, sechdrs, mod); 2201 err = module_finalize(hdr, sechdrs, mod);
2171 if (err < 0) 2202 if (err < 0)
2172 goto cleanup; 2203 goto cleanup;
@@ -2552,9 +2583,9 @@ static char *module_flags(struct module *mod, char *buf)
2552 mod->state == MODULE_STATE_GOING || 2583 mod->state == MODULE_STATE_GOING ||
2553 mod->state == MODULE_STATE_COMING) { 2584 mod->state == MODULE_STATE_COMING) {
2554 buf[bx++] = '('; 2585 buf[bx++] = '(';
2555 if (mod->taints & TAINT_PROPRIETARY_MODULE) 2586 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
2556 buf[bx++] = 'P'; 2587 buf[bx++] = 'P';
2557 if (mod->taints & TAINT_FORCED_MODULE) 2588 if (mod->taints & (1 << TAINT_FORCED_MODULE))
2558 buf[bx++] = 'F'; 2589 buf[bx++] = 'F';
2559 /* 2590 /*
2560 * TAINT_FORCED_RMMOD: could be added. 2591 * TAINT_FORCED_RMMOD: could be added.
diff --git a/kernel/panic.c b/kernel/panic.c
index 12c5a0a6c89b..f290e8e866f6 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -23,7 +23,7 @@
23#include <linux/kallsyms.h> 23#include <linux/kallsyms.h>
24 24
25int panic_on_oops; 25int panic_on_oops;
26int tainted; 26static unsigned long tainted_mask;
27static int pause_on_oops; 27static int pause_on_oops;
28static int pause_on_oops_flag; 28static int pause_on_oops_flag;
29static DEFINE_SPINLOCK(pause_on_oops_lock); 29static DEFINE_SPINLOCK(pause_on_oops_lock);
@@ -143,6 +143,26 @@ NORET_TYPE void panic(const char * fmt, ...)
143 143
144EXPORT_SYMBOL(panic); 144EXPORT_SYMBOL(panic);
145 145
146
147struct tnt {
148 u8 bit;
149 char true;
150 char false;
151};
152
153static const struct tnt tnts[] = {
154 { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
155 { TAINT_FORCED_MODULE, 'F', ' ' },
156 { TAINT_UNSAFE_SMP, 'S', ' ' },
157 { TAINT_FORCED_RMMOD, 'R', ' ' },
158 { TAINT_MACHINE_CHECK, 'M', ' ' },
159 { TAINT_BAD_PAGE, 'B', ' ' },
160 { TAINT_USER, 'U', ' ' },
161 { TAINT_DIE, 'D', ' ' },
162 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
163 { TAINT_WARN, 'W', ' ' },
164};
165
146/** 166/**
147 * print_tainted - return a string to represent the kernel taint state. 167 * print_tainted - return a string to represent the kernel taint state.
148 * 168 *
@@ -158,32 +178,41 @@ EXPORT_SYMBOL(panic);
158 * 178 *
159 * The string is overwritten by the next call to print_taint(). 179 * The string is overwritten by the next call to print_taint().
160 */ 180 */
161
162const char *print_tainted(void) 181const char *print_tainted(void)
163{ 182{
164 static char buf[20]; 183 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
165 if (tainted) { 184
166 snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c", 185 if (tainted_mask) {
167 tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G', 186 char *s;
168 tainted & TAINT_FORCED_MODULE ? 'F' : ' ', 187 int i;
169 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', 188
170 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ', 189 s = buf + sprintf(buf, "Tainted: ");
171 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ', 190 for (i = 0; i < ARRAY_SIZE(tnts); i++) {
172 tainted & TAINT_BAD_PAGE ? 'B' : ' ', 191 const struct tnt *t = &tnts[i];
173 tainted & TAINT_USER ? 'U' : ' ', 192 *s++ = test_bit(t->bit, &tainted_mask) ?
174 tainted & TAINT_DIE ? 'D' : ' ', 193 t->true : t->false;
175 tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ', 194 }
176 tainted & TAINT_WARN ? 'W' : ' '); 195 *s = 0;
177 } 196 } else
178 else
179 snprintf(buf, sizeof(buf), "Not tainted"); 197 snprintf(buf, sizeof(buf), "Not tainted");
180 return(buf); 198 return(buf);
181} 199}
182 200
201int test_taint(unsigned flag)
202{
203 return test_bit(flag, &tainted_mask);
204}
205EXPORT_SYMBOL(test_taint);
206
207unsigned long get_taint(void)
208{
209 return tainted_mask;
210}
211
183void add_taint(unsigned flag) 212void add_taint(unsigned flag)
184{ 213{
185 debug_locks = 0; /* can't trust the integrity of the kernel anymore */ 214 debug_locks = 0; /* can't trust the integrity of the kernel anymore */
186 tainted |= flag; 215 set_bit(flag, &tainted_mask);
187} 216}
188EXPORT_SYMBOL(add_taint); 217EXPORT_SYMBOL(add_taint);
189 218
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index bbd85c60f741..331f9836383f 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -14,6 +14,7 @@
14#include <linux/reboot.h> 14#include <linux/reboot.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/kmod.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/fs.h> 19#include <linux/fs.h>
19#include <linux/mount.h> 20#include <linux/mount.h>
@@ -520,6 +521,10 @@ int hibernate(void)
520 if (error) 521 if (error)
521 goto Exit; 522 goto Exit;
522 523
524 error = usermodehelper_disable();
525 if (error)
526 goto Exit;
527
523 /* Allocate memory management structures */ 528 /* Allocate memory management structures */
524 error = create_basic_memory_bitmaps(); 529 error = create_basic_memory_bitmaps();
525 if (error) 530 if (error)
@@ -558,6 +563,7 @@ int hibernate(void)
558 thaw_processes(); 563 thaw_processes();
559 Finish: 564 Finish:
560 free_basic_memory_bitmaps(); 565 free_basic_memory_bitmaps();
566 usermodehelper_enable();
561 Exit: 567 Exit:
562 pm_notifier_call_chain(PM_POST_HIBERNATION); 568 pm_notifier_call_chain(PM_POST_HIBERNATION);
563 pm_restore_console(); 569 pm_restore_console();
@@ -634,6 +640,10 @@ static int software_resume(void)
634 if (error) 640 if (error)
635 goto Finish; 641 goto Finish;
636 642
643 error = usermodehelper_disable();
644 if (error)
645 goto Finish;
646
637 error = create_basic_memory_bitmaps(); 647 error = create_basic_memory_bitmaps();
638 if (error) 648 if (error)
639 goto Finish; 649 goto Finish;
@@ -656,6 +666,7 @@ static int software_resume(void)
656 thaw_processes(); 666 thaw_processes();
657 Done: 667 Done:
658 free_basic_memory_bitmaps(); 668 free_basic_memory_bitmaps();
669 usermodehelper_enable();
659 Finish: 670 Finish:
660 pm_notifier_call_chain(PM_POST_RESTORE); 671 pm_notifier_call_chain(PM_POST_RESTORE);
661 pm_restore_console(); 672 pm_restore_console();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 540b16b68565..19122cf6d827 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -14,6 +14,7 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/kmod.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/console.h> 19#include <linux/console.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
@@ -237,6 +238,10 @@ static int suspend_prepare(void)
237 if (error) 238 if (error)
238 goto Finish; 239 goto Finish;
239 240
241 error = usermodehelper_disable();
242 if (error)
243 goto Finish;
244
240 if (suspend_freeze_processes()) { 245 if (suspend_freeze_processes()) {
241 error = -EAGAIN; 246 error = -EAGAIN;
242 goto Thaw; 247 goto Thaw;
@@ -256,6 +261,7 @@ static int suspend_prepare(void)
256 261
257 Thaw: 262 Thaw:
258 suspend_thaw_processes(); 263 suspend_thaw_processes();
264 usermodehelper_enable();
259 Finish: 265 Finish:
260 pm_notifier_call_chain(PM_POST_SUSPEND); 266 pm_notifier_call_chain(PM_POST_SUSPEND);
261 pm_restore_console(); 267 pm_restore_console();
@@ -376,6 +382,7 @@ int suspend_devices_and_enter(suspend_state_t state)
376static void suspend_finish(void) 382static void suspend_finish(void)
377{ 383{
378 suspend_thaw_processes(); 384 suspend_thaw_processes();
385 usermodehelper_enable();
379 pm_notifier_call_chain(PM_POST_SUSPEND); 386 pm_notifier_call_chain(PM_POST_SUSPEND);
380 pm_restore_console(); 387 pm_restore_console();
381} 388}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index a6332a313262..005b93d839ba 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -212,13 +212,20 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
212 case SNAPSHOT_FREEZE: 212 case SNAPSHOT_FREEZE:
213 if (data->frozen) 213 if (data->frozen)
214 break; 214 break;
215
215 printk("Syncing filesystems ... "); 216 printk("Syncing filesystems ... ");
216 sys_sync(); 217 sys_sync();
217 printk("done.\n"); 218 printk("done.\n");
218 219
219 error = freeze_processes(); 220 error = usermodehelper_disable();
220 if (error) 221 if (error)
222 break;
223
224 error = freeze_processes();
225 if (error) {
221 thaw_processes(); 226 thaw_processes();
227 usermodehelper_enable();
228 }
222 if (!error) 229 if (!error)
223 data->frozen = 1; 230 data->frozen = 1;
224 break; 231 break;
@@ -227,6 +234,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
227 if (!data->frozen || data->ready) 234 if (!data->frozen || data->ready)
228 break; 235 break;
229 thaw_processes(); 236 thaw_processes();
237 usermodehelper_enable();
230 data->frozen = 0; 238 data->frozen = 0;
231 break; 239 break;
232 240
diff --git a/kernel/printk.c b/kernel/printk.c
index b51b1567bb55..6341af77eb65 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -13,7 +13,7 @@
13 * Fixed SMP synchronization, 08/08/99, Manfred Spraul 13 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
14 * manfred@colorfullife.com 14 * manfred@colorfullife.com
15 * Rewrote bits to get rid of console_lock 15 * Rewrote bits to get rid of console_lock
16 * 01Mar01 Andrew Morton <andrewm@uow.edu.au> 16 * 01Mar01 Andrew Morton
17 */ 17 */
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -577,9 +577,6 @@ static int have_callable_console(void)
577 * @fmt: format string 577 * @fmt: format string
578 * 578 *
579 * This is printk(). It can be called from any context. We want it to work. 579 * This is printk(). It can be called from any context. We want it to work.
580 * Be aware of the fact that if oops_in_progress is not set, we might try to
581 * wake klogd up which could deadlock on runqueue lock if printk() is called
582 * from scheduler code.
583 * 580 *
584 * We try to grab the console_sem. If we succeed, it's easy - we log the output and 581 * We try to grab the console_sem. If we succeed, it's easy - we log the output and
585 * call the console drivers. If we fail to get the semaphore we place the output 582 * call the console drivers. If we fail to get the semaphore we place the output
@@ -593,6 +590,8 @@ static int have_callable_console(void)
593 * 590 *
594 * See also: 591 * See also:
595 * printf(3) 592 * printf(3)
593 *
594 * See the vsnprintf() documentation for format string extensions over C99.
596 */ 595 */
597 596
598asmlinkage int printk(const char *fmt, ...) 597asmlinkage int printk(const char *fmt, ...)
@@ -982,10 +981,25 @@ int is_console_locked(void)
982 return console_locked; 981 return console_locked;
983} 982}
984 983
985void wake_up_klogd(void) 984static DEFINE_PER_CPU(int, printk_pending);
985
986void printk_tick(void)
986{ 987{
987 if (!oops_in_progress && waitqueue_active(&log_wait)) 988 if (__get_cpu_var(printk_pending)) {
989 __get_cpu_var(printk_pending) = 0;
988 wake_up_interruptible(&log_wait); 990 wake_up_interruptible(&log_wait);
991 }
992}
993
994int printk_needs_cpu(int cpu)
995{
996 return per_cpu(printk_pending, cpu);
997}
998
999void wake_up_klogd(void)
1000{
1001 if (waitqueue_active(&log_wait))
1002 __raw_get_cpu_var(printk_pending) = 1;
989} 1003}
990 1004
991/** 1005/**
@@ -1291,22 +1305,6 @@ static int __init disable_boot_consoles(void)
1291} 1305}
1292late_initcall(disable_boot_consoles); 1306late_initcall(disable_boot_consoles);
1293 1307
1294/**
1295 * tty_write_message - write a message to a certain tty, not just the console.
1296 * @tty: the destination tty_struct
1297 * @msg: the message to write
1298 *
1299 * This is used for messages that need to be redirected to a specific tty.
1300 * We don't put it into the syslog queue right now maybe in the future if
1301 * really needed.
1302 */
1303void tty_write_message(struct tty_struct *tty, char *msg)
1304{
1305 if (tty && tty->ops->write)
1306 tty->ops->write(tty, msg, strlen(msg));
1307 return;
1308}
1309
1310#if defined CONFIG_PRINTK 1308#if defined CONFIG_PRINTK
1311 1309
1312/* 1310/*
diff --git a/kernel/profile.c b/kernel/profile.c
index cd26bed4cc26..a9e422df6bf6 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -22,6 +22,8 @@
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
25#include <asm/sections.h> 27#include <asm/sections.h>
26#include <asm/irq_regs.h> 28#include <asm/irq_regs.h>
27#include <asm/ptrace.h> 29#include <asm/ptrace.h>
@@ -50,11 +52,11 @@ static DEFINE_PER_CPU(int, cpu_profile_flip);
50static DEFINE_MUTEX(profile_flip_mutex); 52static DEFINE_MUTEX(profile_flip_mutex);
51#endif /* CONFIG_SMP */ 53#endif /* CONFIG_SMP */
52 54
53static int __init profile_setup(char *str) 55int profile_setup(char *str)
54{ 56{
55 static char __initdata schedstr[] = "schedule"; 57 static char schedstr[] = "schedule";
56 static char __initdata sleepstr[] = "sleep"; 58 static char sleepstr[] = "sleep";
57 static char __initdata kvmstr[] = "kvm"; 59 static char kvmstr[] = "kvm";
58 int par; 60 int par;
59 61
60 if (!strncmp(str, sleepstr, strlen(sleepstr))) { 62 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
@@ -100,14 +102,33 @@ static int __init profile_setup(char *str)
100__setup("profile=", profile_setup); 102__setup("profile=", profile_setup);
101 103
102 104
103void __init profile_init(void) 105int profile_init(void)
104{ 106{
107 int buffer_bytes;
105 if (!prof_on) 108 if (!prof_on)
106 return; 109 return 0;
107 110
108 /* only text is profiled */ 111 /* only text is profiled */
109 prof_len = (_etext - _stext) >> prof_shift; 112 prof_len = (_etext - _stext) >> prof_shift;
110 prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t)); 113 buffer_bytes = prof_len*sizeof(atomic_t);
114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes);
116 return 0;
117 }
118
119 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
120 if (prof_buffer)
121 return 0;
122
123 prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
124 if (prof_buffer)
125 return 0;
126
127 prof_buffer = vmalloc(buffer_bytes);
128 if (prof_buffer)
129 return 0;
130
131 return -ENOMEM;
111} 132}
112 133
113/* Profile event notifications */ 134/* Profile event notifications */
@@ -527,7 +548,7 @@ static void __init profile_nop(void *unused)
527{ 548{
528} 549}
529 550
530static int __init create_hash_tables(void) 551static int create_hash_tables(void)
531{ 552{
532 int cpu; 553 int cpu;
533 554
@@ -575,14 +596,14 @@ out_cleanup:
575#define create_hash_tables() ({ 0; }) 596#define create_hash_tables() ({ 0; })
576#endif 597#endif
577 598
578static int __init create_proc_profile(void) 599int create_proc_profile(void)
579{ 600{
580 struct proc_dir_entry *entry; 601 struct proc_dir_entry *entry;
581 602
582 if (!prof_on) 603 if (!prof_on)
583 return 0; 604 return 0;
584 if (create_hash_tables()) 605 if (create_hash_tables())
585 return -1; 606 return -ENOMEM;
586 entry = proc_create("profile", S_IWUSR | S_IRUGO, 607 entry = proc_create("profile", S_IWUSR | S_IRUGO,
587 NULL, &proc_profile_operations); 608 NULL, &proc_profile_operations);
588 if (!entry) 609 if (!entry)
diff --git a/kernel/resource.c b/kernel/resource.c
index 03d796c1b2e9..4089d12af6e0 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -38,10 +38,6 @@ EXPORT_SYMBOL(iomem_resource);
38 38
39static DEFINE_RWLOCK(resource_lock); 39static DEFINE_RWLOCK(resource_lock);
40 40
41#ifdef CONFIG_PROC_FS
42
43enum { MAX_IORES_LEVEL = 5 };
44
45static void *r_next(struct seq_file *m, void *v, loff_t *pos) 41static void *r_next(struct seq_file *m, void *v, loff_t *pos)
46{ 42{
47 struct resource *p = v; 43 struct resource *p = v;
@@ -53,6 +49,10 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
53 return p->sibling; 49 return p->sibling;
54} 50}
55 51
52#ifdef CONFIG_PROC_FS
53
54enum { MAX_IORES_LEVEL = 5 };
55
56static void *r_start(struct seq_file *m, loff_t *pos) 56static void *r_start(struct seq_file *m, loff_t *pos)
57 __acquires(resource_lock) 57 __acquires(resource_lock)
58{ 58{
@@ -516,6 +516,70 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t
516 return result; 516 return result;
517} 517}
518 518
519static void __init __reserve_region_with_split(struct resource *root,
520 resource_size_t start, resource_size_t end,
521 const char *name)
522{
523 struct resource *parent = root;
524 struct resource *conflict;
525 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
526
527 if (!res)
528 return;
529
530 res->name = name;
531 res->start = start;
532 res->end = end;
533 res->flags = IORESOURCE_BUSY;
534
535 for (;;) {
536 conflict = __request_resource(parent, res);
537 if (!conflict)
538 break;
539 if (conflict != parent) {
540 parent = conflict;
541 if (!(conflict->flags & IORESOURCE_BUSY))
542 continue;
543 }
544
545 /* Uhhuh, that didn't work out.. */
546 kfree(res);
547 res = NULL;
548 break;
549 }
550
551 if (!res) {
552 /* failed, split and try again */
553
554 /* conflict covered whole area */
555 if (conflict->start <= start && conflict->end >= end)
556 return;
557
558 if (conflict->start > start)
559 __reserve_region_with_split(root, start, conflict->start-1, name);
560 if (!(conflict->flags & IORESOURCE_BUSY)) {
561 resource_size_t common_start, common_end;
562
563 common_start = max(conflict->start, start);
564 common_end = min(conflict->end, end);
565 if (common_start < common_end)
566 __reserve_region_with_split(root, common_start, common_end, name);
567 }
568 if (conflict->end < end)
569 __reserve_region_with_split(root, conflict->end+1, end, name);
570 }
571
572}
573
574void reserve_region_with_split(struct resource *root,
575 resource_size_t start, resource_size_t end,
576 const char *name)
577{
578 write_lock(&resource_lock);
579 __reserve_region_with_split(root, start, end, name);
580 write_unlock(&resource_lock);
581}
582
519EXPORT_SYMBOL(adjust_resource); 583EXPORT_SYMBOL(adjust_resource);
520 584
521/** 585/**
@@ -562,33 +626,34 @@ struct resource * __request_region(struct resource *parent,
562{ 626{
563 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 627 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
564 628
565 if (res) { 629 if (!res)
566 res->name = name; 630 return NULL;
567 res->start = start;
568 res->end = start + n - 1;
569 res->flags = IORESOURCE_BUSY;
570 631
571 write_lock(&resource_lock); 632 res->name = name;
633 res->start = start;
634 res->end = start + n - 1;
635 res->flags = IORESOURCE_BUSY;
572 636
573 for (;;) { 637 write_lock(&resource_lock);
574 struct resource *conflict;
575 638
576 conflict = __request_resource(parent, res); 639 for (;;) {
577 if (!conflict) 640 struct resource *conflict;
578 break;
579 if (conflict != parent) {
580 parent = conflict;
581 if (!(conflict->flags & IORESOURCE_BUSY))
582 continue;
583 }
584 641
585 /* Uhhuh, that didn't work out.. */ 642 conflict = __request_resource(parent, res);
586 kfree(res); 643 if (!conflict)
587 res = NULL;
588 break; 644 break;
645 if (conflict != parent) {
646 parent = conflict;
647 if (!(conflict->flags & IORESOURCE_BUSY))
648 continue;
589 } 649 }
590 write_unlock(&resource_lock); 650
651 /* Uhhuh, that didn't work out.. */
652 kfree(res);
653 res = NULL;
654 break;
591 } 655 }
656 write_unlock(&resource_lock);
592 return res; 657 return res;
593} 658}
594EXPORT_SYMBOL(__request_region); 659EXPORT_SYMBOL(__request_region);
@@ -763,3 +828,40 @@ static int __init reserve_setup(char *str)
763} 828}
764 829
765__setup("reserve=", reserve_setup); 830__setup("reserve=", reserve_setup);
831
832/*
833 * Check if the requested addr and size spans more than any slot in the
834 * iomem resource tree.
835 */
836int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
837{
838 struct resource *p = &iomem_resource;
839 int err = 0;
840 loff_t l;
841
842 read_lock(&resource_lock);
843 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
844 /*
845 * We can probably skip the resources without
846 * IORESOURCE_IO attribute?
847 */
848 if (p->start >= addr + size)
849 continue;
850 if (p->end < addr)
851 continue;
852 if (p->start <= addr && (p->end >= addr + size - 1))
853 continue;
854 printk(KERN_WARNING "resource map sanity check conflict: "
855 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
856 (unsigned long long)addr,
857 (unsigned long long)(addr + size - 1),
858 (unsigned long long)p->start,
859 (unsigned long long)p->end,
860 p->name);
861 err = -1;
862 break;
863 }
864 read_unlock(&resource_lock);
865
866 return err;
867}
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index bbe6b31c3c56..ad958c1ec708 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -333,12 +333,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
333 unsigned long flags; 333 unsigned long flags;
334 int num_threads = 1; 334 int num_threads = 1;
335 335
336 rcu_read_lock();
337 if (lock_task_sighand(p, &flags)) { 336 if (lock_task_sighand(p, &flags)) {
338 num_threads = atomic_read(&p->signal->count); 337 num_threads = atomic_read(&p->signal->count);
339 unlock_task_sighand(p, &flags); 338 unlock_task_sighand(p, &flags);
340 } 339 }
341 rcu_read_unlock();
342 340
343 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); 341 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
344 SEQ_printf(m, 342 SEQ_printf(m,
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c506f266a6b9..37d67aa2d56f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -46,7 +46,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
46EXPORT_SYMBOL(irq_stat); 46EXPORT_SYMBOL(irq_stat);
47#endif 47#endif
48 48
49static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; 49static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
50 50
51static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 51static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
52 52
@@ -205,7 +205,18 @@ restart:
205 205
206 do { 206 do {
207 if (pending & 1) { 207 if (pending & 1) {
208 int prev_count = preempt_count();
209
208 h->action(h); 210 h->action(h);
211
212 if (unlikely(prev_count != preempt_count())) {
213 printk(KERN_ERR "huh, entered softirq %td %p"
214 "with preempt_count %08x,"
215 " exited with %08x?\n", h - softirq_vec,
216 h->action, prev_count, preempt_count());
217 preempt_count() = prev_count;
218 }
219
209 rcu_bh_qsctr_inc(cpu); 220 rcu_bh_qsctr_inc(cpu);
210 } 221 }
211 h++; 222 h++;
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index cb838ee93a82..3953e4aed733 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -226,7 +226,7 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
226 * If the system crashed already then all bets are off, 226 * If the system crashed already then all bets are off,
227 * do not report extra hung tasks: 227 * do not report extra hung tasks:
228 */ 228 */
229 if ((tainted & TAINT_DIE) || did_panic) 229 if (test_taint(TAINT_DIE) || did_panic)
230 return; 230 return;
231 231
232 read_lock(&tasklist_lock); 232 read_lock(&tasklist_lock);
diff --git a/kernel/sys.c b/kernel/sys.c
index 038a7bc0901d..0bc8fa3c2288 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1060,9 +1060,7 @@ asmlinkage long sys_setsid(void)
1060 group_leader->signal->leader = 1; 1060 group_leader->signal->leader = 1;
1061 __set_special_pids(sid); 1061 __set_special_pids(sid);
1062 1062
1063 spin_lock(&group_leader->sighand->siglock); 1063 proc_clear_tty(group_leader);
1064 group_leader->signal->tty = NULL;
1065 spin_unlock(&group_leader->sighand->siglock);
1066 1064
1067 err = session; 1065 err = session;
1068out: 1066out:
@@ -1351,8 +1349,10 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1351 down_write(&uts_sem); 1349 down_write(&uts_sem);
1352 errno = -EFAULT; 1350 errno = -EFAULT;
1353 if (!copy_from_user(tmp, name, len)) { 1351 if (!copy_from_user(tmp, name, len)) {
1354 memcpy(utsname()->nodename, tmp, len); 1352 struct new_utsname *u = utsname();
1355 utsname()->nodename[len] = 0; 1353
1354 memcpy(u->nodename, tmp, len);
1355 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1356 errno = 0; 1356 errno = 0;
1357 } 1357 }
1358 up_write(&uts_sem); 1358 up_write(&uts_sem);
@@ -1364,15 +1364,17 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1364asmlinkage long sys_gethostname(char __user *name, int len) 1364asmlinkage long sys_gethostname(char __user *name, int len)
1365{ 1365{
1366 int i, errno; 1366 int i, errno;
1367 struct new_utsname *u;
1367 1368
1368 if (len < 0) 1369 if (len < 0)
1369 return -EINVAL; 1370 return -EINVAL;
1370 down_read(&uts_sem); 1371 down_read(&uts_sem);
1371 i = 1 + strlen(utsname()->nodename); 1372 u = utsname();
1373 i = 1 + strlen(u->nodename);
1372 if (i > len) 1374 if (i > len)
1373 i = len; 1375 i = len;
1374 errno = 0; 1376 errno = 0;
1375 if (copy_to_user(name, utsname()->nodename, i)) 1377 if (copy_to_user(name, u->nodename, i))
1376 errno = -EFAULT; 1378 errno = -EFAULT;
1377 up_read(&uts_sem); 1379 up_read(&uts_sem);
1378 return errno; 1380 return errno;
@@ -1397,8 +1399,10 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
1397 down_write(&uts_sem); 1399 down_write(&uts_sem);
1398 errno = -EFAULT; 1400 errno = -EFAULT;
1399 if (!copy_from_user(tmp, name, len)) { 1401 if (!copy_from_user(tmp, name, len)) {
1400 memcpy(utsname()->domainname, tmp, len); 1402 struct new_utsname *u = utsname();
1401 utsname()->domainname[len] = 0; 1403
1404 memcpy(u->domainname, tmp, len);
1405 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1402 errno = 0; 1406 errno = 0;
1403 } 1407 }
1404 up_write(&uts_sem); 1408 up_write(&uts_sem);
@@ -1452,14 +1456,22 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1452 return -EINVAL; 1456 return -EINVAL;
1453 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1457 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1454 return -EFAULT; 1458 return -EFAULT;
1455 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1456 return -EINVAL;
1457 old_rlim = current->signal->rlim + resource; 1459 old_rlim = current->signal->rlim + resource;
1458 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1460 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1459 !capable(CAP_SYS_RESOURCE)) 1461 !capable(CAP_SYS_RESOURCE))
1460 return -EPERM; 1462 return -EPERM;
1461 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) 1463
1462 return -EPERM; 1464 if (resource == RLIMIT_NOFILE) {
1465 if (new_rlim.rlim_max == RLIM_INFINITY)
1466 new_rlim.rlim_max = sysctl_nr_open;
1467 if (new_rlim.rlim_cur == RLIM_INFINITY)
1468 new_rlim.rlim_cur = sysctl_nr_open;
1469 if (new_rlim.rlim_max > sysctl_nr_open)
1470 return -EPERM;
1471 }
1472
1473 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1474 return -EINVAL;
1463 1475
1464 retval = security_task_setrlimit(resource, &new_rlim); 1476 retval = security_task_setrlimit(resource, &new_rlim);
1465 if (retval) 1477 if (retval)
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 08d6e1bb99ac..a77b27b11b04 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -125,6 +125,12 @@ cond_syscall(sys_vm86old);
125cond_syscall(sys_vm86); 125cond_syscall(sys_vm86);
126cond_syscall(compat_sys_ipc); 126cond_syscall(compat_sys_ipc);
127cond_syscall(compat_sys_sysctl); 127cond_syscall(compat_sys_sysctl);
128cond_syscall(sys_flock);
129cond_syscall(sys_io_setup);
130cond_syscall(sys_io_destroy);
131cond_syscall(sys_io_submit);
132cond_syscall(sys_io_cancel);
133cond_syscall(sys_io_getevents);
128 134
129/* arch-specific weak syscall entries */ 135/* arch-specific weak syscall entries */
130cond_syscall(sys_pciconfig_read); 136cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 50ec0886fa3d..617d41e4d6a0 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -80,7 +80,6 @@ extern int pid_max_min, pid_max_max;
80extern int sysctl_drop_caches; 80extern int sysctl_drop_caches;
81extern int percpu_pagelist_fraction; 81extern int percpu_pagelist_fraction;
82extern int compat_log; 82extern int compat_log;
83extern int maps_protect;
84extern int latencytop_enabled; 83extern int latencytop_enabled;
85extern int sysctl_nr_open_min, sysctl_nr_open_max; 84extern int sysctl_nr_open_min, sysctl_nr_open_max;
86#ifdef CONFIG_RCU_TORTURE_TEST 85#ifdef CONFIG_RCU_TORTURE_TEST
@@ -97,7 +96,7 @@ static int sixty = 60;
97static int neg_one = -1; 96static int neg_one = -1;
98#endif 97#endif
99 98
100#ifdef CONFIG_MMU 99#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
101static int two = 2; 100static int two = 2;
102#endif 101#endif
103 102
@@ -118,10 +117,8 @@ extern char modprobe_path[];
118extern int sg_big_buff; 117extern int sg_big_buff;
119#endif 118#endif
120 119
121#ifdef __sparc__ 120#ifdef CONFIG_SPARC
122extern char reboot_command []; 121#include <asm/system.h>
123extern int stop_a_enabled;
124extern int scons_pwroff;
125#endif 122#endif
126 123
127#ifdef __hppa__ 124#ifdef __hppa__
@@ -152,7 +149,7 @@ extern int max_lock_depth;
152#ifdef CONFIG_PROC_SYSCTL 149#ifdef CONFIG_PROC_SYSCTL
153static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, 150static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
154 void __user *buffer, size_t *lenp, loff_t *ppos); 151 void __user *buffer, size_t *lenp, loff_t *ppos);
155static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp, 152static int proc_taint(struct ctl_table *table, int write, struct file *filp,
156 void __user *buffer, size_t *lenp, loff_t *ppos); 153 void __user *buffer, size_t *lenp, loff_t *ppos);
157#endif 154#endif
158 155
@@ -382,10 +379,9 @@ static struct ctl_table kern_table[] = {
382#ifdef CONFIG_PROC_SYSCTL 379#ifdef CONFIG_PROC_SYSCTL
383 { 380 {
384 .procname = "tainted", 381 .procname = "tainted",
385 .data = &tainted, 382 .maxlen = sizeof(long),
386 .maxlen = sizeof(int),
387 .mode = 0644, 383 .mode = 0644,
388 .proc_handler = &proc_dointvec_taint, 384 .proc_handler = &proc_taint,
389 }, 385 },
390#endif 386#endif
391#ifdef CONFIG_LATENCYTOP 387#ifdef CONFIG_LATENCYTOP
@@ -415,7 +411,7 @@ static struct ctl_table kern_table[] = {
415 .mode = 0644, 411 .mode = 0644,
416 .proc_handler = &proc_dointvec, 412 .proc_handler = &proc_dointvec,
417 }, 413 },
418#ifdef __sparc__ 414#ifdef CONFIG_SPARC
419 { 415 {
420 .ctl_name = KERN_SPARC_REBOOT, 416 .ctl_name = KERN_SPARC_REBOOT,
421 .procname = "reboot-cmd", 417 .procname = "reboot-cmd",
@@ -810,16 +806,6 @@ static struct ctl_table kern_table[] = {
810 .proc_handler = &proc_dointvec, 806 .proc_handler = &proc_dointvec,
811 }, 807 },
812#endif 808#endif
813#ifdef CONFIG_PROC_FS
814 {
815 .ctl_name = CTL_UNNUMBERED,
816 .procname = "maps_protect",
817 .data = &maps_protect,
818 .maxlen = sizeof(int),
819 .mode = 0644,
820 .proc_handler = &proc_dointvec,
821 },
822#endif
823 { 809 {
824 .ctl_name = CTL_UNNUMBERED, 810 .ctl_name = CTL_UNNUMBERED,
825 .procname = "poweroff_cmd", 811 .procname = "poweroff_cmd",
@@ -1261,6 +1247,7 @@ static struct ctl_table fs_table[] = {
1261 .extra1 = &minolduid, 1247 .extra1 = &minolduid,
1262 .extra2 = &maxolduid, 1248 .extra2 = &maxolduid,
1263 }, 1249 },
1250#ifdef CONFIG_FILE_LOCKING
1264 { 1251 {
1265 .ctl_name = FS_LEASES, 1252 .ctl_name = FS_LEASES,
1266 .procname = "leases-enable", 1253 .procname = "leases-enable",
@@ -1269,6 +1256,7 @@ static struct ctl_table fs_table[] = {
1269 .mode = 0644, 1256 .mode = 0644,
1270 .proc_handler = &proc_dointvec, 1257 .proc_handler = &proc_dointvec,
1271 }, 1258 },
1259#endif
1272#ifdef CONFIG_DNOTIFY 1260#ifdef CONFIG_DNOTIFY
1273 { 1261 {
1274 .ctl_name = FS_DIR_NOTIFY, 1262 .ctl_name = FS_DIR_NOTIFY,
@@ -1280,6 +1268,7 @@ static struct ctl_table fs_table[] = {
1280 }, 1268 },
1281#endif 1269#endif
1282#ifdef CONFIG_MMU 1270#ifdef CONFIG_MMU
1271#ifdef CONFIG_FILE_LOCKING
1283 { 1272 {
1284 .ctl_name = FS_LEASE_TIME, 1273 .ctl_name = FS_LEASE_TIME,
1285 .procname = "lease-break-time", 1274 .procname = "lease-break-time",
@@ -1291,6 +1280,8 @@ static struct ctl_table fs_table[] = {
1291 .extra1 = &zero, 1280 .extra1 = &zero,
1292 .extra2 = &two, 1281 .extra2 = &two,
1293 }, 1282 },
1283#endif
1284#ifdef CONFIG_AIO
1294 { 1285 {
1295 .procname = "aio-nr", 1286 .procname = "aio-nr",
1296 .data = &aio_nr, 1287 .data = &aio_nr,
@@ -1305,6 +1296,7 @@ static struct ctl_table fs_table[] = {
1305 .mode = 0644, 1296 .mode = 0644,
1306 .proc_handler = &proc_doulongvec_minmax, 1297 .proc_handler = &proc_doulongvec_minmax,
1307 }, 1298 },
1299#endif /* CONFIG_AIO */
1308#ifdef CONFIG_INOTIFY_USER 1300#ifdef CONFIG_INOTIFY_USER
1309 { 1301 {
1310 .ctl_name = FS_INOTIFY, 1302 .ctl_name = FS_INOTIFY,
@@ -1510,7 +1502,6 @@ void register_sysctl_root(struct ctl_table_root *root)
1510/* Perform the actual read/write of a sysctl table entry. */ 1502/* Perform the actual read/write of a sysctl table entry. */
1511static int do_sysctl_strategy(struct ctl_table_root *root, 1503static int do_sysctl_strategy(struct ctl_table_root *root,
1512 struct ctl_table *table, 1504 struct ctl_table *table,
1513 int __user *name, int nlen,
1514 void __user *oldval, size_t __user *oldlenp, 1505 void __user *oldval, size_t __user *oldlenp,
1515 void __user *newval, size_t newlen) 1506 void __user *newval, size_t newlen)
1516{ 1507{
@@ -1524,8 +1515,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
1524 return -EPERM; 1515 return -EPERM;
1525 1516
1526 if (table->strategy) { 1517 if (table->strategy) {
1527 rc = table->strategy(table, name, nlen, oldval, oldlenp, 1518 rc = table->strategy(table, oldval, oldlenp, newval, newlen);
1528 newval, newlen);
1529 if (rc < 0) 1519 if (rc < 0)
1530 return rc; 1520 return rc;
1531 if (rc > 0) 1521 if (rc > 0)
@@ -1535,8 +1525,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
1535 /* If there is no strategy routine, or if the strategy returns 1525 /* If there is no strategy routine, or if the strategy returns
1536 * zero, proceed with automatic r/w */ 1526 * zero, proceed with automatic r/w */
1537 if (table->data && table->maxlen) { 1527 if (table->data && table->maxlen) {
1538 rc = sysctl_data(table, name, nlen, oldval, oldlenp, 1528 rc = sysctl_data(table, oldval, oldlenp, newval, newlen);
1539 newval, newlen);
1540 if (rc < 0) 1529 if (rc < 0)
1541 return rc; 1530 return rc;
1542 } 1531 }
@@ -1568,7 +1557,7 @@ repeat:
1568 table = table->child; 1557 table = table->child;
1569 goto repeat; 1558 goto repeat;
1570 } 1559 }
1571 error = do_sysctl_strategy(root, table, name, nlen, 1560 error = do_sysctl_strategy(root, table,
1572 oldval, oldlenp, 1561 oldval, oldlenp,
1573 newval, newlen); 1562 newval, newlen);
1574 return error; 1563 return error;
@@ -2237,49 +2226,39 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
2237 NULL,NULL); 2226 NULL,NULL);
2238} 2227}
2239 2228
2240#define OP_SET 0
2241#define OP_AND 1
2242#define OP_OR 2
2243
2244static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
2245 int *valp,
2246 int write, void *data)
2247{
2248 int op = *(int *)data;
2249 if (write) {
2250 int val = *negp ? -*lvalp : *lvalp;
2251 switch(op) {
2252 case OP_SET: *valp = val; break;
2253 case OP_AND: *valp &= val; break;
2254 case OP_OR: *valp |= val; break;
2255 }
2256 } else {
2257 int val = *valp;
2258 if (val < 0) {
2259 *negp = -1;
2260 *lvalp = (unsigned long)-val;
2261 } else {
2262 *negp = 0;
2263 *lvalp = (unsigned long)val;
2264 }
2265 }
2266 return 0;
2267}
2268
2269/* 2229/*
2270 * Taint values can only be increased 2230 * Taint values can only be increased
2231 * This means we can safely use a temporary.
2271 */ 2232 */
2272static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp, 2233static int proc_taint(struct ctl_table *table, int write, struct file *filp,
2273 void __user *buffer, size_t *lenp, loff_t *ppos) 2234 void __user *buffer, size_t *lenp, loff_t *ppos)
2274{ 2235{
2275 int op; 2236 struct ctl_table t;
2237 unsigned long tmptaint = get_taint();
2238 int err;
2276 2239
2277 if (write && !capable(CAP_SYS_ADMIN)) 2240 if (write && !capable(CAP_SYS_ADMIN))
2278 return -EPERM; 2241 return -EPERM;
2279 2242
2280 op = OP_OR; 2243 t = *table;
2281 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 2244 t.data = &tmptaint;
2282 do_proc_dointvec_bset_conv,&op); 2245 err = proc_doulongvec_minmax(&t, write, filp, buffer, lenp, ppos);
2246 if (err < 0)
2247 return err;
2248
2249 if (write) {
2250 /*
2251 * Poor man's atomic or. Not worth adding a primitive
2252 * to everyone's atomic.h for this
2253 */
2254 int i;
2255 for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) {
2256 if ((tmptaint >> i) & 1)
2257 add_taint(i);
2258 }
2259 }
2260
2261 return err;
2283} 2262}
2284 2263
2285struct do_proc_dointvec_minmax_conv_param { 2264struct do_proc_dointvec_minmax_conv_param {
@@ -2727,7 +2706,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2727 */ 2706 */
2728 2707
2729/* The generic sysctl data routine (used if no strategy routine supplied) */ 2708/* The generic sysctl data routine (used if no strategy routine supplied) */
2730int sysctl_data(struct ctl_table *table, int __user *name, int nlen, 2709int sysctl_data(struct ctl_table *table,
2731 void __user *oldval, size_t __user *oldlenp, 2710 void __user *oldval, size_t __user *oldlenp,
2732 void __user *newval, size_t newlen) 2711 void __user *newval, size_t newlen)
2733{ 2712{
@@ -2761,7 +2740,7 @@ int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
2761} 2740}
2762 2741
2763/* The generic string strategy routine: */ 2742/* The generic string strategy routine: */
2764int sysctl_string(struct ctl_table *table, int __user *name, int nlen, 2743int sysctl_string(struct ctl_table *table,
2765 void __user *oldval, size_t __user *oldlenp, 2744 void __user *oldval, size_t __user *oldlenp,
2766 void __user *newval, size_t newlen) 2745 void __user *newval, size_t newlen)
2767{ 2746{
@@ -2807,7 +2786,7 @@ int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
2807 * are between the minimum and maximum values given in the arrays 2786 * are between the minimum and maximum values given in the arrays
2808 * table->extra1 and table->extra2, respectively. 2787 * table->extra1 and table->extra2, respectively.
2809 */ 2788 */
2810int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen, 2789int sysctl_intvec(struct ctl_table *table,
2811 void __user *oldval, size_t __user *oldlenp, 2790 void __user *oldval, size_t __user *oldlenp,
2812 void __user *newval, size_t newlen) 2791 void __user *newval, size_t newlen)
2813{ 2792{
@@ -2843,7 +2822,7 @@ int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
2843} 2822}
2844 2823
2845/* Strategy function to convert jiffies to seconds */ 2824/* Strategy function to convert jiffies to seconds */
2846int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen, 2825int sysctl_jiffies(struct ctl_table *table,
2847 void __user *oldval, size_t __user *oldlenp, 2826 void __user *oldval, size_t __user *oldlenp,
2848 void __user *newval, size_t newlen) 2827 void __user *newval, size_t newlen)
2849{ 2828{
@@ -2877,7 +2856,7 @@ int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
2877} 2856}
2878 2857
2879/* Strategy function to convert jiffies to seconds */ 2858/* Strategy function to convert jiffies to seconds */
2880int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen, 2859int sysctl_ms_jiffies(struct ctl_table *table,
2881 void __user *oldval, size_t __user *oldlenp, 2860 void __user *oldval, size_t __user *oldlenp,
2882 void __user *newval, size_t newlen) 2861 void __user *newval, size_t newlen)
2883{ 2862{
@@ -2932,35 +2911,35 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
2932 return error; 2911 return error;
2933} 2912}
2934 2913
2935int sysctl_data(struct ctl_table *table, int __user *name, int nlen, 2914int sysctl_data(struct ctl_table *table,
2936 void __user *oldval, size_t __user *oldlenp, 2915 void __user *oldval, size_t __user *oldlenp,
2937 void __user *newval, size_t newlen) 2916 void __user *newval, size_t newlen)
2938{ 2917{
2939 return -ENOSYS; 2918 return -ENOSYS;
2940} 2919}
2941 2920
2942int sysctl_string(struct ctl_table *table, int __user *name, int nlen, 2921int sysctl_string(struct ctl_table *table,
2943 void __user *oldval, size_t __user *oldlenp, 2922 void __user *oldval, size_t __user *oldlenp,
2944 void __user *newval, size_t newlen) 2923 void __user *newval, size_t newlen)
2945{ 2924{
2946 return -ENOSYS; 2925 return -ENOSYS;
2947} 2926}
2948 2927
2949int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen, 2928int sysctl_intvec(struct ctl_table *table,
2950 void __user *oldval, size_t __user *oldlenp, 2929 void __user *oldval, size_t __user *oldlenp,
2951 void __user *newval, size_t newlen) 2930 void __user *newval, size_t newlen)
2952{ 2931{
2953 return -ENOSYS; 2932 return -ENOSYS;
2954} 2933}
2955 2934
2956int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen, 2935int sysctl_jiffies(struct ctl_table *table,
2957 void __user *oldval, size_t __user *oldlenp, 2936 void __user *oldval, size_t __user *oldlenp,
2958 void __user *newval, size_t newlen) 2937 void __user *newval, size_t newlen)
2959{ 2938{
2960 return -ENOSYS; 2939 return -ENOSYS;
2961} 2940}
2962 2941
2963int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen, 2942int sysctl_ms_jiffies(struct ctl_table *table,
2964 void __user *oldval, size_t __user *oldlenp, 2943 void __user *oldval, size_t __user *oldlenp,
2965 void __user *newval, size_t newlen) 2944 void __user *newval, size_t newlen)
2966{ 2945{
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 8d53106a0a92..95ed42951e0a 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -3,7 +3,6 @@
3# 3#
4config TICK_ONESHOT 4config TICK_ONESHOT
5 bool 5 bool
6 default n
7 6
8config NO_HZ 7config NO_HZ
9 bool "Tickless System (Dynamic Ticks)" 8 bool "Tickless System (Dynamic Ticks)"
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cb02324bdb88..b711ffcb106c 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
20#include <linux/profile.h> 20#include <linux/profile.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/tick.h> 22#include <linux/tick.h>
23#include <linux/module.h>
23 24
24#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
25 26
@@ -190,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
190{ 191{
191 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 192 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
192 193
193 *last_update_time = ktime_to_us(ts->idle_lastupdate); 194 if (!tick_nohz_enabled)
195 return -1;
196
197 if (ts->idle_active)
198 *last_update_time = ktime_to_us(ts->idle_lastupdate);
199 else
200 *last_update_time = ktime_to_us(ktime_get());
201
194 return ktime_to_us(ts->idle_sleeptime); 202 return ktime_to_us(ts->idle_sleeptime);
195} 203}
204EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
196 205
197/** 206/**
198 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 207 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
@@ -261,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle)
261 next_jiffies = get_next_timer_interrupt(last_jiffies); 270 next_jiffies = get_next_timer_interrupt(last_jiffies);
262 delta_jiffies = next_jiffies - last_jiffies; 271 delta_jiffies = next_jiffies - last_jiffies;
263 272
264 if (rcu_needs_cpu(cpu)) 273 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
265 delta_jiffies = 1; 274 delta_jiffies = 1;
266 /* 275 /*
267 * Do not stop the tick, if we are only one off 276 * Do not stop the tick, if we are only one off
diff --git a/kernel/timer.c b/kernel/timer.c
index 03bc7f1f1593..510fe69351ca 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -978,6 +978,7 @@ void update_process_times(int user_tick)
978 run_local_timers(); 978 run_local_timers();
979 if (rcu_pending(cpu)) 979 if (rcu_pending(cpu))
980 rcu_check_callbacks(cpu, user_tick); 980 rcu_check_callbacks(cpu, user_tick);
981 printk_tick();
981 scheduler_tick(); 982 scheduler_tick();
982 run_posix_cpu_timers(p); 983 run_posix_cpu_timers(p);
983} 984}
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 4ab9659d269e..3b34b3545936 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -60,7 +60,7 @@ static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
60 60
61#ifdef CONFIG_SYSCTL_SYSCALL 61#ifdef CONFIG_SYSCTL_SYSCALL
62/* The generic string strategy routine: */ 62/* The generic string strategy routine: */
63static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen, 63static int sysctl_uts_string(ctl_table *table,
64 void __user *oldval, size_t __user *oldlenp, 64 void __user *oldval, size_t __user *oldlenp,
65 void __user *newval, size_t newlen) 65 void __user *newval, size_t newlen)
66{ 66{
@@ -69,8 +69,7 @@ static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
69 write = newval && newlen; 69 write = newval && newlen;
70 memcpy(&uts_table, table, sizeof(uts_table)); 70 memcpy(&uts_table, table, sizeof(uts_table));
71 uts_table.data = get_uts(table, write); 71 uts_table.data = get_uts(table, write);
72 r = sysctl_string(&uts_table, name, nlen, 72 r = sysctl_string(&uts_table, oldval, oldlenp, newval, newlen);
73 oldval, oldlenp, newval, newlen);
74 put_uts(table, write, uts_table.data); 73 put_uts(table, write, uts_table.data);
75 return r; 74 return r;
76} 75}
diff --git a/kernel/wait.c b/kernel/wait.c
index c275c56cf2d3..cd87131f2fc2 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -72,12 +72,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
72 spin_lock_irqsave(&q->lock, flags); 72 spin_lock_irqsave(&q->lock, flags);
73 if (list_empty(&wait->task_list)) 73 if (list_empty(&wait->task_list))
74 __add_wait_queue(q, wait); 74 __add_wait_queue(q, wait);
75 /* 75 set_current_state(state);
76 * don't alter the task state if this is just going to
77 * queue an async wait queue callback
78 */
79 if (is_sync_wait(wait))
80 set_current_state(state);
81 spin_unlock_irqrestore(&q->lock, flags); 76 spin_unlock_irqrestore(&q->lock, flags);
82} 77}
83EXPORT_SYMBOL(prepare_to_wait); 78EXPORT_SYMBOL(prepare_to_wait);
@@ -91,12 +86,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
91 spin_lock_irqsave(&q->lock, flags); 86 spin_lock_irqsave(&q->lock, flags);
92 if (list_empty(&wait->task_list)) 87 if (list_empty(&wait->task_list))
93 __add_wait_queue_tail(q, wait); 88 __add_wait_queue_tail(q, wait);
94 /* 89 set_current_state(state);
95 * don't alter the task state if this is just going to
96 * queue an async wait queue callback
97 */
98 if (is_sync_wait(wait))
99 set_current_state(state);
100 spin_unlock_irqrestore(&q->lock, flags); 90 spin_unlock_irqrestore(&q->lock, flags);
101} 91}
102EXPORT_SYMBOL(prepare_to_wait_exclusive); 92EXPORT_SYMBOL(prepare_to_wait_exclusive);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4048e92aa04f..714afad46539 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -9,7 +9,7 @@
9 * Derived from the taskqueue/keventd code by: 9 * Derived from the taskqueue/keventd code by:
10 * 10 *
11 * David Woodhouse <dwmw2@infradead.org> 11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au> 12 * Andrew Morton
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 * 15 *