aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-05-19 00:08:20 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-19 00:08:20 -0400
commitbb803cfbecb03a0cf8dc7e1864f18dda6631af00 (patch)
tree6c0989693bea6f50cfa5c6bb14f52ec19668def3 /kernel
parent3878fb6fdbceecca20b15748f807340854220f06 (diff)
parent511e11e396dc596825ce04d53d7f6d579404bc01 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/scsi/fcoe/fcoe.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c3
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/cgroup.c3
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/manage.c5
-rw-r--r--kernel/kgdb.c4
-rw-r--r--kernel/kprobes.c31
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/panic.c42
-rw-r--r--kernel/posix-cpu-timers.c8
-rw-r--r--kernel/power/disk.c51
-rw-r--r--kernel/power/main.c24
-rw-r--r--kernel/power/swap.c2
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/rcupdate.c18
-rw-r--r--kernel/rcutree.c19
-rw-r--r--kernel/rcutree_trace.c14
-rw-r--r--kernel/resource.c46
-rw-r--r--kernel/sched.c12
-rw-r--r--kernel/slow-work.c4
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/sys.c24
-rw-r--r--kernel/sysctl.c48
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/tick-common.c12
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace_branch.c8
-rw-r--r--kernel/trace/trace_power.c7
30 files changed, 229 insertions, 222 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 917ab9525568..6e7351739a82 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new)
734 dentry = dget(path.dentry); 734 dentry = dget(path.dentry);
735 path_put(&path); 735 path_put(&path);
736 736
737 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
738 follow_up(&mnt, &dentry);
739
740 list_add_tail(&list, &tagged->mnt_list); 737 list_add_tail(&list, &tagged->mnt_list);
741 738
742 mutex_lock(&audit_filter_mutex); 739 mutex_lock(&audit_filter_mutex);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index a6fe71fd5d1b..713098ee5a02 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent,
1028 1028
1029 if (audit_enabled) { 1029 if (audit_enabled) {
1030 struct audit_buffer *ab; 1030 struct audit_buffer *ab;
1031 ab = audit_log_start(NULL, GFP_KERNEL, 1031 ab = audit_log_start(NULL, GFP_NOFS,
1032 AUDIT_CONFIG_CHANGE); 1032 AUDIT_CONFIG_CHANGE);
1033 audit_log_format(ab, "auid=%u ses=%u", 1033 audit_log_format(ab, "auid=%u ses=%u",
1034 audit_get_loginuid(current), 1034 audit_get_loginuid(current),
@@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
1067 e = container_of(r, struct audit_entry, rule); 1067 e = container_of(r, struct audit_entry, rule);
1068 if (audit_enabled) { 1068 if (audit_enabled) {
1069 struct audit_buffer *ab; 1069 struct audit_buffer *ab;
1070 ab = audit_log_start(NULL, GFP_KERNEL, 1070 ab = audit_log_start(NULL, GFP_NOFS,
1071 AUDIT_CONFIG_CHANGE); 1071 AUDIT_CONFIG_CHANGE);
1072 audit_log_format(ab, "auid=%u ses=%u", 1072 audit_log_format(ab, "auid=%u ses=%u",
1073 audit_get_loginuid(current), 1073 audit_get_loginuid(current),
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 382109b5baeb..a7267bfd3765 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1133,8 +1133,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
1133 free_cg_links: 1133 free_cg_links:
1134 free_cg_links(&tmp_cg_links); 1134 free_cg_links(&tmp_cg_links);
1135 drop_new_super: 1135 drop_new_super:
1136 up_write(&sb->s_umount); 1136 deactivate_locked_super(sb);
1137 deactivate_super(sb);
1138 return ret; 1137 return ret;
1139} 1138}
1140 1139
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index d82142be8dd2..26e08754744f 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -363,8 +363,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
363 irqreturn_t ret, retval = IRQ_NONE; 363 irqreturn_t ret, retval = IRQ_NONE;
364 unsigned int status = 0; 364 unsigned int status = 0;
365 365
366 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
367
368 if (!(action->flags & IRQF_DISABLED)) 366 if (!(action->flags & IRQF_DISABLED))
369 local_irq_enable_in_hardirq(); 367 local_irq_enable_in_hardirq();
370 368
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7e2e7dd4cd2f..2734eca59243 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109 spin_lock_irqsave(&desc->lock, flags); 109 spin_lock_irqsave(&desc->lock, flags);
110 110
111#ifdef CONFIG_GENERIC_PENDING_IRQ 111#ifdef CONFIG_GENERIC_PENDING_IRQ
112 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 112 if (desc->status & IRQ_MOVE_PCNTXT)
113 cpumask_copy(desc->affinity, cpumask);
114 desc->chip->set_affinity(irq, cpumask); 113 desc->chip->set_affinity(irq, cpumask);
115 } else { 114 else {
116 desc->status |= IRQ_MOVE_PENDING; 115 desc->status |= IRQ_MOVE_PENDING;
117 cpumask_copy(desc->pending_mask, cpumask); 116 cpumask_copy(desc->pending_mask, cpumask);
118 } 117 }
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index e4dcfb2272a4..9147a3190c9d 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -1583,8 +1583,8 @@ static void sysrq_handle_gdb(int key, struct tty_struct *tty)
1583 1583
1584static struct sysrq_key_op sysrq_gdb_op = { 1584static struct sysrq_key_op sysrq_gdb_op = {
1585 .handler = sysrq_handle_gdb, 1585 .handler = sysrq_handle_gdb,
1586 .help_msg = "Gdb", 1586 .help_msg = "debug(G)",
1587 .action_msg = "GDB", 1587 .action_msg = "DEBUG",
1588}; 1588};
1589#endif 1589#endif
1590 1590
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a5e74ddee0e2..c0fa54b276d9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -319,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr)
319 return NULL; 319 return NULL;
320} 320}
321 321
322/* Arm a kprobe with text_mutex */
323static void __kprobes arm_kprobe(struct kprobe *kp)
324{
325 mutex_lock(&text_mutex);
326 arch_arm_kprobe(kp);
327 mutex_unlock(&text_mutex);
328}
329
330/* Disarm a kprobe with text_mutex */
331static void __kprobes disarm_kprobe(struct kprobe *kp)
332{
333 mutex_lock(&text_mutex);
334 arch_disarm_kprobe(kp);
335 mutex_unlock(&text_mutex);
336}
337
322/* 338/*
323 * Aggregate handlers for multiple kprobes support - these handlers 339 * Aggregate handlers for multiple kprobes support - these handlers
324 * take care of invoking the individual kprobe handlers on p->list 340 * take care of invoking the individual kprobe handlers on p->list
@@ -538,7 +554,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
538 ap->flags &= ~KPROBE_FLAG_DISABLED; 554 ap->flags &= ~KPROBE_FLAG_DISABLED;
539 if (!kprobes_all_disarmed) 555 if (!kprobes_all_disarmed)
540 /* Arm the breakpoint again. */ 556 /* Arm the breakpoint again. */
541 arch_arm_kprobe(ap); 557 arm_kprobe(ap);
542 } 558 }
543 return 0; 559 return 0;
544} 560}
@@ -789,11 +805,8 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
789 * enabled and not gone - otherwise, the breakpoint would 805 * enabled and not gone - otherwise, the breakpoint would
790 * already have been removed. We save on flushing icache. 806 * already have been removed. We save on flushing icache.
791 */ 807 */
792 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { 808 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
793 mutex_lock(&text_mutex); 809 disarm_kprobe(p);
794 arch_disarm_kprobe(p);
795 mutex_unlock(&text_mutex);
796 }
797 hlist_del_rcu(&old_p->hlist); 810 hlist_del_rcu(&old_p->hlist);
798 } else { 811 } else {
799 if (p->break_handler && !kprobe_gone(p)) 812 if (p->break_handler && !kprobe_gone(p))
@@ -810,7 +823,7 @@ noclean:
810 if (!kprobe_disabled(old_p)) { 823 if (!kprobe_disabled(old_p)) {
811 try_to_disable_aggr_kprobe(old_p); 824 try_to_disable_aggr_kprobe(old_p);
812 if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 825 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
813 arch_disarm_kprobe(old_p); 826 disarm_kprobe(old_p);
814 } 827 }
815 } 828 }
816 return 0; 829 return 0;
@@ -1364,7 +1377,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
1364 try_to_disable_aggr_kprobe(p); 1377 try_to_disable_aggr_kprobe(p);
1365 1378
1366 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1379 if (!kprobes_all_disarmed && kprobe_disabled(p))
1367 arch_disarm_kprobe(p); 1380 disarm_kprobe(p);
1368out: 1381out:
1369 mutex_unlock(&kprobe_mutex); 1382 mutex_unlock(&kprobe_mutex);
1370 return ret; 1383 return ret;
@@ -1393,7 +1406,7 @@ int __kprobes enable_kprobe(struct kprobe *kp)
1393 } 1406 }
1394 1407
1395 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1408 if (!kprobes_all_disarmed && kprobe_disabled(p))
1396 arch_arm_kprobe(p); 1409 arm_kprobe(p);
1397 1410
1398 p->flags &= ~KPROBE_FLAG_DISABLED; 1411 p->flags &= ~KPROBE_FLAG_DISABLED;
1399 if (p != kp) 1412 if (p != kp)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b0f011866969..accb40cdb12a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2490void lockdep_init_map(struct lockdep_map *lock, const char *name, 2490void lockdep_init_map(struct lockdep_map *lock, const char *name,
2491 struct lock_class_key *key, int subclass) 2491 struct lock_class_key *key, int subclass)
2492{ 2492{
2493 if (unlikely(!debug_locks)) 2493 lock->class_cache = NULL;
2494#ifdef CONFIG_LOCK_STAT
2495 lock->cpu = raw_smp_processor_id();
2496#endif
2497
2498 if (DEBUG_LOCKS_WARN_ON(!name)) {
2499 lock->name = "NULL";
2494 return; 2500 return;
2501 }
2502
2503 lock->name = name;
2495 2504
2496 if (DEBUG_LOCKS_WARN_ON(!key)) 2505 if (DEBUG_LOCKS_WARN_ON(!key))
2497 return; 2506 return;
2498 if (DEBUG_LOCKS_WARN_ON(!name))
2499 return;
2500 /* 2507 /*
2501 * Sanity check, the lock-class key must be persistent: 2508 * Sanity check, the lock-class key must be persistent:
2502 */ 2509 */
@@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2505 DEBUG_LOCKS_WARN_ON(1); 2512 DEBUG_LOCKS_WARN_ON(1);
2506 return; 2513 return;
2507 } 2514 }
2508 lock->name = name;
2509 lock->key = key; 2515 lock->key = key;
2510 lock->class_cache = NULL; 2516
2511#ifdef CONFIG_LOCK_STAT 2517 if (unlikely(!debug_locks))
2512 lock->cpu = raw_smp_processor_id(); 2518 return;
2513#endif 2519
2514 if (subclass) 2520 if (subclass)
2515 register_lock_class(lock, subclass, 1); 2521 register_lock_class(lock, subclass, 1);
2516} 2522}
diff --git a/kernel/panic.c b/kernel/panic.c
index 934fb377f4b3..984b3ecbd72c 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -221,7 +221,7 @@ void add_taint(unsigned flag)
221 * post-warning case. 221 * post-warning case.
222 */ 222 */
223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) 223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
224 printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); 224 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
225 225
226 set_bit(flag, &tainted_mask); 226 set_bit(flag, &tainted_mask);
227} 227}
@@ -340,34 +340,46 @@ void oops_exit(void)
340} 340}
341 341
342#ifdef WANT_WARN_ON_SLOWPATH 342#ifdef WANT_WARN_ON_SLOWPATH
343void warn_slowpath(const char *file, int line, const char *fmt, ...) 343struct slowpath_args {
344{ 344 const char *fmt;
345 va_list args; 345 va_list args;
346 char function[KSYM_SYMBOL_LEN]; 346};
347 unsigned long caller = (unsigned long)__builtin_return_address(0);
348 const char *board;
349 347
350 sprint_symbol(function, caller); 348static void warn_slowpath_common(const char *file, int line, void *caller, struct slowpath_args *args)
349{
350 const char *board;
351 351
352 printk(KERN_WARNING "------------[ cut here ]------------\n"); 352 printk(KERN_WARNING "------------[ cut here ]------------\n");
353 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, 353 printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
354 line, function);
355 board = dmi_get_system_info(DMI_PRODUCT_NAME); 354 board = dmi_get_system_info(DMI_PRODUCT_NAME);
356 if (board) 355 if (board)
357 printk(KERN_WARNING "Hardware name: %s\n", board); 356 printk(KERN_WARNING "Hardware name: %s\n", board);
358 357
359 if (fmt) { 358 if (args)
360 va_start(args, fmt); 359 vprintk(args->fmt, args->args);
361 vprintk(fmt, args);
362 va_end(args);
363 }
364 360
365 print_modules(); 361 print_modules();
366 dump_stack(); 362 dump_stack();
367 print_oops_end_marker(); 363 print_oops_end_marker();
368 add_taint(TAINT_WARN); 364 add_taint(TAINT_WARN);
369} 365}
370EXPORT_SYMBOL(warn_slowpath); 366
367void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
368{
369 struct slowpath_args args;
370
371 args.fmt = fmt;
372 va_start(args.args, fmt);
373 warn_slowpath_common(file, line, __builtin_return_address(0), &args);
374 va_end(args.args);
375}
376EXPORT_SYMBOL(warn_slowpath_fmt);
377
378void warn_slowpath_null(const char *file, int line)
379{
380 warn_slowpath_common(file, line, __builtin_return_address(0), NULL);
381}
382EXPORT_SYMBOL(warn_slowpath_null);
371#endif 383#endif
372 384
373#ifdef CONFIG_CC_STACKPROTECTOR 385#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c9dcf98b4463..bece7c0b67b2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1420 * timer call will interfere. 1420 * timer call will interfere.
1421 */ 1421 */
1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1423 int firing; 1423 int cpu_firing;
1424
1424 spin_lock(&timer->it_lock); 1425 spin_lock(&timer->it_lock);
1425 list_del_init(&timer->it.cpu.entry); 1426 list_del_init(&timer->it.cpu.entry);
1426 firing = timer->it.cpu.firing; 1427 cpu_firing = timer->it.cpu.firing;
1427 timer->it.cpu.firing = 0; 1428 timer->it.cpu.firing = 0;
1428 /* 1429 /*
1429 * The firing flag is -1 if we collided with a reset 1430 * The firing flag is -1 if we collided with a reset
1430 * of the timer, which already reported this 1431 * of the timer, which already reported this
1431 * almost-firing as an overrun. So don't generate an event. 1432 * almost-firing as an overrun. So don't generate an event.
1432 */ 1433 */
1433 if (likely(firing >= 0)) { 1434 if (likely(cpu_firing >= 0))
1434 cpu_timer_fire(timer); 1435 cpu_timer_fire(timer);
1435 }
1436 spin_unlock(&timer->it_lock); 1436 spin_unlock(&timer->it_lock);
1437 } 1437 }
1438} 1438}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 0854770b63b9..e71ca9cd81b2 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -646,13 +646,6 @@ static int software_resume(void)
646 return 0; 646 return 0;
647 647
648 /* 648 /*
649 * We can't depend on SCSI devices being available after loading one of
650 * their modules if scsi_complete_async_scans() is not called and the
651 * resume device usually is a SCSI one.
652 */
653 scsi_complete_async_scans();
654
655 /*
656 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs 649 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
657 * is configured into the kernel. Since the regular hibernate 650 * is configured into the kernel. Since the regular hibernate
658 * trigger path is via sysfs which takes a buffer mutex before 651 * trigger path is via sysfs which takes a buffer mutex before
@@ -663,32 +656,42 @@ static int software_resume(void)
663 * here to avoid lockdep complaining. 656 * here to avoid lockdep complaining.
664 */ 657 */
665 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); 658 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
659
660 if (swsusp_resume_device)
661 goto Check_image;
662
663 if (!strlen(resume_file)) {
664 error = -ENOENT;
665 goto Unlock;
666 }
667
668 pr_debug("PM: Checking image partition %s\n", resume_file);
669
670 /* Check if the device is there */
671 swsusp_resume_device = name_to_dev_t(resume_file);
666 if (!swsusp_resume_device) { 672 if (!swsusp_resume_device) {
667 if (!strlen(resume_file)) {
668 mutex_unlock(&pm_mutex);
669 return -ENOENT;
670 }
671 /* 673 /*
672 * Some device discovery might still be in progress; we need 674 * Some device discovery might still be in progress; we need
673 * to wait for this to finish. 675 * to wait for this to finish.
674 */ 676 */
675 wait_for_device_probe(); 677 wait_for_device_probe();
678 /*
679 * We can't depend on SCSI devices being available after loading
680 * one of their modules until scsi_complete_async_scans() is
681 * called and the resume device usually is a SCSI one.
682 */
683 scsi_complete_async_scans();
684
676 swsusp_resume_device = name_to_dev_t(resume_file); 685 swsusp_resume_device = name_to_dev_t(resume_file);
677 pr_debug("PM: Resume from partition %s\n", resume_file); 686 if (!swsusp_resume_device) {
678 } else { 687 error = -ENODEV;
679 pr_debug("PM: Resume from partition %d:%d\n", 688 goto Unlock;
680 MAJOR(swsusp_resume_device), 689 }
681 MINOR(swsusp_resume_device));
682 } 690 }
683 691
684 if (noresume) { 692 Check_image:
685 /** 693 pr_debug("PM: Resume from partition %d:%d\n",
686 * FIXME: If noresume is specified, we need to find the 694 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
687 * partition and reset it back to normal swap space.
688 */
689 mutex_unlock(&pm_mutex);
690 return 0;
691 }
692 695
693 pr_debug("PM: Checking hibernation image.\n"); 696 pr_debug("PM: Checking hibernation image.\n");
694 error = swsusp_check(); 697 error = swsusp_check();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f172f41858bb..f99ed6a75eac 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state)
291 291
292 device_pm_lock(); 292 device_pm_lock();
293 293
294 if (suspend_ops->prepare) {
295 error = suspend_ops->prepare();
296 if (error)
297 goto Done;
298 }
299
294 error = device_power_down(PMSG_SUSPEND); 300 error = device_power_down(PMSG_SUSPEND);
295 if (error) { 301 if (error) {
296 printk(KERN_ERR "PM: Some devices failed to power down\n"); 302 printk(KERN_ERR "PM: Some devices failed to power down\n");
297 goto Done; 303 goto Platfrom_finish;
298 } 304 }
299 305
300 if (suspend_ops->prepare) { 306 if (suspend_ops->prepare_late) {
301 error = suspend_ops->prepare(); 307 error = suspend_ops->prepare_late();
302 if (error) 308 if (error)
303 goto Power_up_devices; 309 goto Power_up_devices;
304 } 310 }
305 311
306 if (suspend_test(TEST_PLATFORM)) 312 if (suspend_test(TEST_PLATFORM))
307 goto Platfrom_finish; 313 goto Platform_wake;
308 314
309 error = disable_nonboot_cpus(); 315 error = disable_nonboot_cpus();
310 if (error || suspend_test(TEST_CPUS)) 316 if (error || suspend_test(TEST_CPUS))
@@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state)
326 Enable_cpus: 332 Enable_cpus:
327 enable_nonboot_cpus(); 333 enable_nonboot_cpus();
328 334
329 Platfrom_finish: 335 Platform_wake:
330 if (suspend_ops->finish) 336 if (suspend_ops->wake)
331 suspend_ops->finish(); 337 suspend_ops->wake();
332 338
333 Power_up_devices: 339 Power_up_devices:
334 device_power_up(PMSG_RESUME); 340 device_power_up(PMSG_RESUME);
335 341
342 Platfrom_finish:
343 if (suspend_ops->finish)
344 suspend_ops->finish();
345
336 Done: 346 Done:
337 device_pm_unlock(); 347 device_pm_unlock();
338 348
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 505f319e489c..8ba052c86d48 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -64,8 +64,6 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
64 struct bio *bio; 64 struct bio *bio;
65 65
66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
67 if (!bio)
68 return -ENOMEM;
69 bio->bi_sector = page_off * (PAGE_SIZE >> 9); 67 bio->bi_sector = page_off * (PAGE_SIZE >> 9);
70 bio->bi_bdev = resume_bdev; 68 bio->bi_bdev = resume_bdev;
71 bio->bi_end_io = end_swap_bio_read; 69 bio->bi_end_io = end_swap_bio_read;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 64191fa09b7e..0692ab5a0d67 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task)
188 /* Protect exec's credential calculations against our interference; 188 /* Protect exec's credential calculations against our interference;
189 * SUID, SGID and LSM creds get determined differently under ptrace. 189 * SUID, SGID and LSM creds get determined differently under ptrace.
190 */ 190 */
191 retval = mutex_lock_interruptible(&current->cred_exec_mutex); 191 retval = mutex_lock_interruptible(&task->cred_exec_mutex);
192 if (retval < 0) 192 if (retval < 0)
193 goto out; 193 goto out;
194 194
@@ -232,7 +232,7 @@ repeat:
232bad: 232bad:
233 write_unlock_irqrestore(&tasklist_lock, flags); 233 write_unlock_irqrestore(&tasklist_lock, flags);
234 task_unlock(task); 234 task_unlock(task);
235 mutex_unlock(&current->cred_exec_mutex); 235 mutex_unlock(&task->cred_exec_mutex);
236out: 236out:
237 return retval; 237 return retval;
238} 238}
@@ -604,10 +604,11 @@ repeat:
604 ret = security_ptrace_traceme(current->parent); 604 ret = security_ptrace_traceme(current->parent);
605 605
606 /* 606 /*
607 * Set the ptrace bit in the process ptrace flags. 607 * Check PF_EXITING to ensure ->real_parent has not passed
608 * Then link us on our parent's ptraced list. 608 * exit_ptrace(). Otherwise we don't report the error but
609 * pretend ->real_parent untraces us right after return.
609 */ 610 */
610 if (!ret) { 611 if (!ret && !(current->real_parent->flags & PF_EXITING)) {
611 current->ptrace |= PT_PTRACED; 612 current->ptrace |= PT_PTRACED;
612 __ptrace_link(current, current->real_parent); 613 __ptrace_link(current, current->real_parent);
613 } 614 }
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 2c7b8457d0d2..a967c9feb90a 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -58,6 +58,10 @@ static DEFINE_MUTEX(rcu_barrier_mutex);
58static struct completion rcu_barrier_completion; 58static struct completion rcu_barrier_completion;
59int rcu_scheduler_active __read_mostly; 59int rcu_scheduler_active __read_mostly;
60 60
61static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
62static struct rcu_head rcu_migrate_head[3];
63static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
64
61/* 65/*
62 * Awaken the corresponding synchronize_rcu() instance now that a 66 * Awaken the corresponding synchronize_rcu() instance now that a
63 * grace period has elapsed. 67 * grace period has elapsed.
@@ -122,7 +126,10 @@ static void rcu_barrier_func(void *type)
122 } 126 }
123} 127}
124 128
125static inline void wait_migrated_callbacks(void); 129static inline void wait_migrated_callbacks(void)
130{
131 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
132}
126 133
127/* 134/*
128 * Orchestrate the specified type of RCU barrier, waiting for all 135 * Orchestrate the specified type of RCU barrier, waiting for all
@@ -179,21 +186,12 @@ void rcu_barrier_sched(void)
179} 186}
180EXPORT_SYMBOL_GPL(rcu_barrier_sched); 187EXPORT_SYMBOL_GPL(rcu_barrier_sched);
181 188
182static atomic_t rcu_migrate_type_count = ATOMIC_INIT(0);
183static struct rcu_head rcu_migrate_head[3];
184static DECLARE_WAIT_QUEUE_HEAD(rcu_migrate_wq);
185
186static void rcu_migrate_callback(struct rcu_head *notused) 189static void rcu_migrate_callback(struct rcu_head *notused)
187{ 190{
188 if (atomic_dec_and_test(&rcu_migrate_type_count)) 191 if (atomic_dec_and_test(&rcu_migrate_type_count))
189 wake_up(&rcu_migrate_wq); 192 wake_up(&rcu_migrate_wq);
190} 193}
191 194
192static inline void wait_migrated_callbacks(void)
193{
194 wait_event(rcu_migrate_wq, !atomic_read(&rcu_migrate_type_count));
195}
196
197static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self, 195static int __cpuinit rcu_barrier_cpu_hotplug(struct notifier_block *self,
198 unsigned long action, void *hcpu) 196 unsigned long action, void *hcpu)
199{ 197{
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 7f3266922572..d2a372fb0b9b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -530,8 +530,6 @@ static void note_new_gpnum(struct rcu_state *rsp, struct rcu_data *rdp)
530 rdp->qs_pending = 1; 530 rdp->qs_pending = 1;
531 rdp->passed_quiesc = 0; 531 rdp->passed_quiesc = 0;
532 rdp->gpnum = rsp->gpnum; 532 rdp->gpnum = rsp->gpnum;
533 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
534 RCU_JIFFIES_TILL_FORCE_QS;
535} 533}
536 534
537/* 535/*
@@ -578,8 +576,6 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
578 rsp->gpnum++; 576 rsp->gpnum++;
579 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */ 577 rsp->signaled = RCU_GP_INIT; /* Hold off force_quiescent_state. */
580 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 578 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
581 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
582 RCU_JIFFIES_TILL_FORCE_QS;
583 record_gp_stall_check_time(rsp); 579 record_gp_stall_check_time(rsp);
584 dyntick_record_completed(rsp, rsp->completed - 1); 580 dyntick_record_completed(rsp, rsp->completed - 1);
585 note_new_gpnum(rsp, rdp); 581 note_new_gpnum(rsp, rdp);
@@ -1055,7 +1051,6 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1055{ 1051{
1056 unsigned long flags; 1052 unsigned long flags;
1057 long lastcomp; 1053 long lastcomp;
1058 struct rcu_data *rdp = rsp->rda[smp_processor_id()];
1059 struct rcu_node *rnp = rcu_get_root(rsp); 1054 struct rcu_node *rnp = rcu_get_root(rsp);
1060 u8 signaled; 1055 u8 signaled;
1061 1056
@@ -1066,16 +1061,13 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1066 return; /* Someone else is already on the job. */ 1061 return; /* Someone else is already on the job. */
1067 } 1062 }
1068 if (relaxed && 1063 if (relaxed &&
1069 (long)(rsp->jiffies_force_qs - jiffies) >= 0 && 1064 (long)(rsp->jiffies_force_qs - jiffies) >= 0)
1070 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) >= 0)
1071 goto unlock_ret; /* no emergency and done recently. */ 1065 goto unlock_ret; /* no emergency and done recently. */
1072 rsp->n_force_qs++; 1066 rsp->n_force_qs++;
1073 spin_lock(&rnp->lock); 1067 spin_lock(&rnp->lock);
1074 lastcomp = rsp->completed; 1068 lastcomp = rsp->completed;
1075 signaled = rsp->signaled; 1069 signaled = rsp->signaled;
1076 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; 1070 rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS;
1077 rdp->n_rcu_pending_force_qs = rdp->n_rcu_pending +
1078 RCU_JIFFIES_TILL_FORCE_QS;
1079 if (lastcomp == rsp->gpnum) { 1071 if (lastcomp == rsp->gpnum) {
1080 rsp->n_force_qs_ngp++; 1072 rsp->n_force_qs_ngp++;
1081 spin_unlock(&rnp->lock); 1073 spin_unlock(&rnp->lock);
@@ -1144,8 +1136,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1144 * If an RCU GP has gone long enough, go check for dyntick 1136 * If an RCU GP has gone long enough, go check for dyntick
1145 * idle CPUs and, if needed, send resched IPIs. 1137 * idle CPUs and, if needed, send resched IPIs.
1146 */ 1138 */
1147 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1139 if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1148 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1149 force_quiescent_state(rsp, 1); 1140 force_quiescent_state(rsp, 1);
1150 1141
1151 /* 1142 /*
@@ -1230,8 +1221,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1230 if (unlikely(++rdp->qlen > qhimark)) { 1221 if (unlikely(++rdp->qlen > qhimark)) {
1231 rdp->blimit = LONG_MAX; 1222 rdp->blimit = LONG_MAX;
1232 force_quiescent_state(rsp, 0); 1223 force_quiescent_state(rsp, 0);
1233 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1224 } else if ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0)
1234 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0)
1235 force_quiescent_state(rsp, 1); 1225 force_quiescent_state(rsp, 1);
1236 local_irq_restore(flags); 1226 local_irq_restore(flags);
1237} 1227}
@@ -1290,8 +1280,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
1290 1280
1291 /* Has an RCU GP gone long enough to send resched IPIs &c? */ 1281 /* Has an RCU GP gone long enough to send resched IPIs &c? */
1292 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) && 1282 if (ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum) &&
1293 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0 || 1283 ((long)(ACCESS_ONCE(rsp->jiffies_force_qs) - jiffies) < 0))
1294 (rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending) < 0))
1295 return 1; 1284 return 1;
1296 1285
1297 /* nothing to do */ 1286 /* nothing to do */
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 4ee954f6a8d5..4b1875ba9404 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -49,14 +49,12 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
49{ 49{
50 if (!rdp->beenonline) 50 if (!rdp->beenonline)
51 return; 51 return;
52 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d rpfq=%ld rp=%x", 52 seq_printf(m, "%3d%cc=%ld g=%ld pq=%d pqc=%ld qp=%d",
53 rdp->cpu, 53 rdp->cpu,
54 cpu_is_offline(rdp->cpu) ? '!' : ' ', 54 cpu_is_offline(rdp->cpu) ? '!' : ' ',
55 rdp->completed, rdp->gpnum, 55 rdp->completed, rdp->gpnum,
56 rdp->passed_quiesc, rdp->passed_quiesc_completed, 56 rdp->passed_quiesc, rdp->passed_quiesc_completed,
57 rdp->qs_pending, 57 rdp->qs_pending);
58 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
59 (int)(rdp->n_rcu_pending & 0xffff));
60#ifdef CONFIG_NO_HZ 58#ifdef CONFIG_NO_HZ
61 seq_printf(m, " dt=%d/%d dn=%d df=%lu", 59 seq_printf(m, " dt=%d/%d dn=%d df=%lu",
62 rdp->dynticks->dynticks, 60 rdp->dynticks->dynticks,
@@ -102,14 +100,12 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
102{ 100{
103 if (!rdp->beenonline) 101 if (!rdp->beenonline)
104 return; 102 return;
105 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d,%ld,%ld", 103 seq_printf(m, "%d,%s,%ld,%ld,%d,%ld,%d",
106 rdp->cpu, 104 rdp->cpu,
107 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"", 105 cpu_is_offline(rdp->cpu) ? "\"Y\"" : "\"N\"",
108 rdp->completed, rdp->gpnum, 106 rdp->completed, rdp->gpnum,
109 rdp->passed_quiesc, rdp->passed_quiesc_completed, 107 rdp->passed_quiesc, rdp->passed_quiesc_completed,
110 rdp->qs_pending, 108 rdp->qs_pending);
111 rdp->n_rcu_pending_force_qs - rdp->n_rcu_pending,
112 rdp->n_rcu_pending);
113#ifdef CONFIG_NO_HZ 109#ifdef CONFIG_NO_HZ
114 seq_printf(m, ",%d,%d,%d,%lu", 110 seq_printf(m, ",%d,%d,%d,%lu",
115 rdp->dynticks->dynticks, 111 rdp->dynticks->dynticks,
@@ -123,7 +119,7 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
123 119
124static int show_rcudata_csv(struct seq_file *m, void *unused) 120static int show_rcudata_csv(struct seq_file *m, void *unused)
125{ 121{
126 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",\"rpfq\",\"rp\","); 122 seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pqc\",\"pq\",");
127#ifdef CONFIG_NO_HZ 123#ifdef CONFIG_NO_HZ
128 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\","); 124 seq_puts(m, "\"dt\",\"dt nesting\",\"dn\",\"df\",");
129#endif /* #ifdef CONFIG_NO_HZ */ 125#endif /* #ifdef CONFIG_NO_HZ */
diff --git a/kernel/resource.c b/kernel/resource.c
index fd5d7d574bb9..ac5f3a36923f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root,
533 res->end = end; 533 res->end = end;
534 res->flags = IORESOURCE_BUSY; 534 res->flags = IORESOURCE_BUSY;
535 535
536 for (;;) { 536 conflict = __request_resource(parent, res);
537 conflict = __request_resource(parent, res); 537 if (!conflict)
538 if (!conflict) 538 return;
539 break;
540 if (conflict != parent) {
541 parent = conflict;
542 if (!(conflict->flags & IORESOURCE_BUSY))
543 continue;
544 }
545
546 /* Uhhuh, that didn't work out.. */
547 kfree(res);
548 res = NULL;
549 break;
550 }
551
552 if (!res) {
553 /* failed, split and try again */
554
555 /* conflict covered whole area */
556 if (conflict->start <= start && conflict->end >= end)
557 return;
558 539
559 if (conflict->start > start) 540 /* failed, split and try again */
560 __reserve_region_with_split(root, start, conflict->start-1, name); 541 kfree(res);
561 if (!(conflict->flags & IORESOURCE_BUSY)) {
562 resource_size_t common_start, common_end;
563 542
564 common_start = max(conflict->start, start); 543 /* conflict covered whole area */
565 common_end = min(conflict->end, end); 544 if (conflict->start <= start && conflict->end >= end)
566 if (common_start < common_end) 545 return;
567 __reserve_region_with_split(root, common_start, common_end, name);
568 }
569 if (conflict->end < end)
570 __reserve_region_with_split(root, conflict->end+1, end, name);
571 }
572 546
547 if (conflict->start > start)
548 __reserve_region_with_split(root, start, conflict->start-1, name);
549 if (conflict->end < end)
550 __reserve_region_with_split(root, conflict->end+1, end, name);
573} 551}
574 552
575void __init reserve_region_with_split(struct resource *root, 553void __init reserve_region_with_split(struct resource *root,
diff --git a/kernel/sched.c b/kernel/sched.c
index 5724508c3b66..26efa475bdc1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4732,7 +4732,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
4732 4732
4733 if (user_tick) 4733 if (user_tick)
4734 account_user_time(p, one_jiffy, one_jiffy_scaled); 4734 account_user_time(p, one_jiffy, one_jiffy_scaled);
4735 else if (p != rq->idle) 4735 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
4736 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 4736 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4737 one_jiffy_scaled); 4737 one_jiffy_scaled);
4738 else 4738 else
@@ -4846,7 +4846,7 @@ void scheduler_tick(void)
4846#endif 4846#endif
4847} 4847}
4848 4848
4849unsigned long get_parent_ip(unsigned long addr) 4849notrace unsigned long get_parent_ip(unsigned long addr)
4850{ 4850{
4851 if (in_lock_functions(addr)) { 4851 if (in_lock_functions(addr)) {
4852 addr = CALLER_ADDR2; 4852 addr = CALLER_ADDR2;
@@ -7367,8 +7367,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
7367 cpumask_or(groupmask, groupmask, sched_group_cpus(group)); 7367 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
7368 7368
7369 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); 7369 cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
7370 printk(KERN_CONT " %s (__cpu_power = %d)", str, 7370
7371 group->__cpu_power); 7371 printk(KERN_CONT " %s", str);
7372 if (group->__cpu_power != SCHED_LOAD_SCALE) {
7373 printk(KERN_CONT " (__cpu_power = %d)",
7374 group->__cpu_power);
7375 }
7372 7376
7373 group = group->next; 7377 group = group->next;
7374 } while (group != sd->groups); 7378 } while (group != sd->groups);
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index cf2bc01186ef..b28d19135f43 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -609,14 +609,14 @@ void slow_work_unregister_user(void)
609 if (slow_work_user_count == 0) { 609 if (slow_work_user_count == 0) {
610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); 610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
611 slow_work_threads_should_exit = true; 611 slow_work_threads_should_exit = true;
612 del_timer_sync(&slow_work_cull_timer);
613 del_timer_sync(&slow_work_oom_timer);
612 wake_up_all(&slow_work_thread_wq); 614 wake_up_all(&slow_work_thread_wq);
613 wait_for_completion(&slow_work_last_thread_exited); 615 wait_for_completion(&slow_work_last_thread_exited);
614 printk(KERN_NOTICE "Slow work thread pool:" 616 printk(KERN_NOTICE "Slow work thread pool:"
615 " Shut down complete\n"); 617 " Shut down complete\n");
616 } 618 }
617 619
618 del_timer_sync(&slow_work_cull_timer);
619
620 mutex_unlock(&slow_work_user_lock); 620 mutex_unlock(&slow_work_user_lock);
621} 621}
622EXPORT_SYMBOL(slow_work_unregister_user); 622EXPORT_SYMBOL(slow_work_unregister_user);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 2fecefacdc5b..b525dd348511 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -472,9 +472,9 @@ void tasklet_kill(struct tasklet_struct *t)
472 printk("Attempt to kill tasklet from interrupt\n"); 472 printk("Attempt to kill tasklet from interrupt\n");
473 473
474 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { 474 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
475 do 475 do {
476 yield(); 476 yield();
477 while (test_bit(TASKLET_STATE_SCHED, &t->state)); 477 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
478 } 478 }
479 tasklet_unlock_wait(t); 479 tasklet_unlock_wait(t);
480 clear_bit(TASKLET_STATE_SCHED, &t->state); 480 clear_bit(TASKLET_STATE_SCHED, &t->state);
diff --git a/kernel/sys.c b/kernel/sys.c
index 51dbb55604e8..e7998cf31498 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -360,6 +360,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
360 void __user *, arg) 360 void __user *, arg)
361{ 361{
362 char buffer[256]; 362 char buffer[256];
363 int ret = 0;
363 364
364 /* We only trust the superuser with rebooting the system. */ 365 /* We only trust the superuser with rebooting the system. */
365 if (!capable(CAP_SYS_BOOT)) 366 if (!capable(CAP_SYS_BOOT))
@@ -397,7 +398,7 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
397 kernel_halt(); 398 kernel_halt();
398 unlock_kernel(); 399 unlock_kernel();
399 do_exit(0); 400 do_exit(0);
400 break; 401 panic("cannot halt");
401 402
402 case LINUX_REBOOT_CMD_POWER_OFF: 403 case LINUX_REBOOT_CMD_POWER_OFF:
403 kernel_power_off(); 404 kernel_power_off();
@@ -417,29 +418,22 @@ SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
417 418
418#ifdef CONFIG_KEXEC 419#ifdef CONFIG_KEXEC
419 case LINUX_REBOOT_CMD_KEXEC: 420 case LINUX_REBOOT_CMD_KEXEC:
420 { 421 ret = kernel_kexec();
421 int ret; 422 break;
422 ret = kernel_kexec();
423 unlock_kernel();
424 return ret;
425 }
426#endif 423#endif
427 424
428#ifdef CONFIG_HIBERNATION 425#ifdef CONFIG_HIBERNATION
429 case LINUX_REBOOT_CMD_SW_SUSPEND: 426 case LINUX_REBOOT_CMD_SW_SUSPEND:
430 { 427 ret = hibernate();
431 int ret = hibernate(); 428 break;
432 unlock_kernel();
433 return ret;
434 }
435#endif 429#endif
436 430
437 default: 431 default:
438 unlock_kernel(); 432 ret = -EINVAL;
439 return -EINVAL; 433 break;
440 } 434 }
441 unlock_kernel(); 435 unlock_kernel();
442 return 0; 436 return ret;
443} 437}
444 438
445static void deferred_cad(struct work_struct *dummy) 439static void deferred_cad(struct work_struct *dummy)
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4286b62b34a0..b2970d56fb76 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -101,7 +101,9 @@ static int __maybe_unused one = 1;
101static int __maybe_unused two = 2; 101static int __maybe_unused two = 2;
102static unsigned long one_ul = 1; 102static unsigned long one_ul = 1;
103static int one_hundred = 100; 103static int one_hundred = 100;
104static int one_thousand = 1000; 104
105/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
106static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
105 107
106/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 108/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
107static int maxolduid = 65535; 109static int maxolduid = 65535;
@@ -902,16 +904,6 @@ static struct ctl_table kern_table[] = {
902 .proc_handler = &proc_dointvec, 904 .proc_handler = &proc_dointvec,
903 }, 905 },
904#endif 906#endif
905#ifdef CONFIG_UNEVICTABLE_LRU
906 {
907 .ctl_name = CTL_UNNUMBERED,
908 .procname = "scan_unevictable_pages",
909 .data = &scan_unevictable_pages,
910 .maxlen = sizeof(scan_unevictable_pages),
911 .mode = 0644,
912 .proc_handler = &scan_unevictable_handler,
913 },
914#endif
915#ifdef CONFIG_SLOW_WORK 907#ifdef CONFIG_SLOW_WORK
916 { 908 {
917 .ctl_name = CTL_UNNUMBERED, 909 .ctl_name = CTL_UNNUMBERED,
@@ -1016,7 +1008,7 @@ static struct ctl_table vm_table[] = {
1016 .mode = 0644, 1008 .mode = 0644,
1017 .proc_handler = &dirty_bytes_handler, 1009 .proc_handler = &dirty_bytes_handler,
1018 .strategy = &sysctl_intvec, 1010 .strategy = &sysctl_intvec,
1019 .extra1 = &one_ul, 1011 .extra1 = &dirty_bytes_min,
1020 }, 1012 },
1021 { 1013 {
1022 .procname = "dirty_writeback_centisecs", 1014 .procname = "dirty_writeback_centisecs",
@@ -1041,28 +1033,6 @@ static struct ctl_table vm_table[] = {
1041 .proc_handler = &proc_dointvec, 1033 .proc_handler = &proc_dointvec,
1042 }, 1034 },
1043 { 1035 {
1044 .ctl_name = CTL_UNNUMBERED,
1045 .procname = "nr_pdflush_threads_min",
1046 .data = &nr_pdflush_threads_min,
1047 .maxlen = sizeof nr_pdflush_threads_min,
1048 .mode = 0644 /* read-write */,
1049 .proc_handler = &proc_dointvec_minmax,
1050 .strategy = &sysctl_intvec,
1051 .extra1 = &one,
1052 .extra2 = &nr_pdflush_threads_max,
1053 },
1054 {
1055 .ctl_name = CTL_UNNUMBERED,
1056 .procname = "nr_pdflush_threads_max",
1057 .data = &nr_pdflush_threads_max,
1058 .maxlen = sizeof nr_pdflush_threads_max,
1059 .mode = 0644 /* read-write */,
1060 .proc_handler = &proc_dointvec_minmax,
1061 .strategy = &sysctl_intvec,
1062 .extra1 = &nr_pdflush_threads_min,
1063 .extra2 = &one_thousand,
1064 },
1065 {
1066 .ctl_name = VM_SWAPPINESS, 1036 .ctl_name = VM_SWAPPINESS,
1067 .procname = "swappiness", 1037 .procname = "swappiness",
1068 .data = &vm_swappiness, 1038 .data = &vm_swappiness,
@@ -1302,6 +1272,16 @@ static struct ctl_table vm_table[] = {
1302 .extra2 = &one, 1272 .extra2 = &one,
1303 }, 1273 },
1304#endif 1274#endif
1275#ifdef CONFIG_UNEVICTABLE_LRU
1276 {
1277 .ctl_name = CTL_UNNUMBERED,
1278 .procname = "scan_unevictable_pages",
1279 .data = &scan_unevictable_pages,
1280 .maxlen = sizeof(scan_unevictable_pages),
1281 .mode = 0644,
1282 .proc_handler = &scan_unevictable_handler,
1283 },
1284#endif
1305/* 1285/*
1306 * NOTE: do not add new entries to this table unless you have read 1286 * NOTE: do not add new entries to this table unless you have read
1307 * Documentation/sysctl/ctl_unnumbered.txt 1287 * Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c46c931a7fe7..ecfd7b5187e0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data)
181 181
182 resumed = test_and_clear_bit(0, &watchdog_resumed); 182 resumed = test_and_clear_bit(0, &watchdog_resumed);
183 183
184 wdnow = watchdog->read(); 184 wdnow = watchdog->read(watchdog);
185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
186 watchdog_last = wdnow; 186 watchdog_last = wdnow;
187 187
188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
189 csnow = cs->read(); 189 csnow = cs->read(cs);
190 190
191 if (unlikely(resumed)) { 191 if (unlikely(resumed)) {
192 cs->wd_last = csnow; 192 cs->wd_last = csnow;
@@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
247 247
248 list_add(&cs->wd_list, &watchdog_list); 248 list_add(&cs->wd_list, &watchdog_list);
249 if (!started && watchdog) { 249 if (!started && watchdog) {
250 watchdog_last = watchdog->read(); 250 watchdog_last = watchdog->read(watchdog);
251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
252 add_timer_on(&watchdog_timer, 252 add_timer_on(&watchdog_timer,
253 cpumask_first(cpu_online_mask)); 253 cpumask_first(cpu_online_mask));
@@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG; 268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
269 /* Start if list is not empty */ 269 /* Start if list is not empty */
270 if (!list_empty(&watchdog_list)) { 270 if (!list_empty(&watchdog_list)) {
271 watchdog_last = watchdog->read(); 271 watchdog_last = watchdog->read(watchdog);
272 watchdog_timer.expires = 272 watchdog_timer.expires =
273 jiffies + WATCHDOG_INTERVAL; 273 jiffies + WATCHDOG_INTERVAL;
274 add_timer_on(&watchdog_timer, 274 add_timer_on(&watchdog_timer,
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 06f197560f3b..c3f6c30816e3 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -50,7 +50,7 @@
50 */ 50 */
51#define JIFFIES_SHIFT 8 51#define JIFFIES_SHIFT 8
52 52
53static cycle_t jiffies_read(void) 53static cycle_t jiffies_read(struct clocksource *cs)
54{ 54{
55 return (cycle_t) jiffies; 55 return (cycle_t) jiffies;
56} 56}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 21a5ca849514..83c4417b6a3c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
93 for (;;) { 93 for (;;) {
94 if (!clockevents_program_event(dev, next, ktime_get())) 94 if (!clockevents_program_event(dev, next, ktime_get()))
95 return; 95 return;
96 tick_periodic(cpu); 96 /*
97 * Have to be careful here. If we're in oneshot mode,
98 * before we call tick_periodic() in a loop, we need
99 * to be sure we're using a real hardware clocksource.
100 * Otherwise we could get trapped in an infinite
101 * loop, as the tick_periodic() increments jiffies,
102 * when then will increment time, posibly causing
103 * the loop to trigger again and again.
104 */
105 if (timekeeping_valid_for_hres())
106 tick_periodic(cpu);
97 next = ktime_add(next, tick_period); 107 next = ktime_add(next, tick_period);
98 } 108 }
99} 109}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 900f1b6598d1..687dff49f6e7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday);
182 */ 182 */
183static void change_clocksource(void) 183static void change_clocksource(void)
184{ 184{
185 struct clocksource *new; 185 struct clocksource *new, *old;
186 186
187 new = clocksource_get_next(); 187 new = clocksource_get_next();
188 188
@@ -191,11 +191,16 @@ static void change_clocksource(void)
191 191
192 clocksource_forward_now(); 192 clocksource_forward_now();
193 193
194 new->raw_time = clock->raw_time; 194 if (clocksource_enable(new))
195 return;
195 196
197 new->raw_time = clock->raw_time;
198 old = clock;
196 clock = new; 199 clock = new;
200 clocksource_disable(old);
201
197 clock->cycle_last = 0; 202 clock->cycle_last = 0;
198 clock->cycle_last = clocksource_read(new); 203 clock->cycle_last = clocksource_read(clock);
199 clock->error = 0; 204 clock->error = 0;
200 clock->xtime_nsec = 0; 205 clock->xtime_nsec = 0;
201 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 206 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -292,6 +297,7 @@ void __init timekeeping_init(void)
292 ntp_init(); 297 ntp_init();
293 298
294 clock = clocksource_get_next(); 299 clock = clocksource_get_next();
300 clocksource_enable(clock);
295 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 301 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
296 clock->cycle_last = clocksource_read(clock); 302 clock->cycle_last = clocksource_read(clock);
297 303
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1ce5dc6372b8..a884c09006c4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3448,6 +3448,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3448 if (!ref) 3448 if (!ref)
3449 break; 3449 break;
3450 3450
3451 ref->ref = 1;
3451 ref->buffer = info->tr->buffer; 3452 ref->buffer = info->tr->buffer;
3452 ref->page = ring_buffer_alloc_read_page(ref->buffer); 3453 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3453 if (!ref->page) { 3454 if (!ref->page) {
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
index ad8c22efff41..8333715e4066 100644
--- a/kernel/trace/trace_branch.c
+++ b/kernel/trace/trace_branch.c
@@ -155,6 +155,13 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
155 return TRACE_TYPE_HANDLED; 155 return TRACE_TYPE_HANDLED;
156} 156}
157 157
158static void branch_print_header(struct seq_file *s)
159{
160 seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT"
161 " FUNC:FILE:LINE\n");
162 seq_puts(s, "# | | | | | "
163 " |\n");
164}
158 165
159static struct trace_event trace_branch_event = { 166static struct trace_event trace_branch_event = {
160 .type = TRACE_BRANCH, 167 .type = TRACE_BRANCH,
@@ -169,6 +176,7 @@ static struct tracer branch_trace __read_mostly =
169#ifdef CONFIG_FTRACE_SELFTEST 176#ifdef CONFIG_FTRACE_SELFTEST
170 .selftest = trace_selftest_startup_branch, 177 .selftest = trace_selftest_startup_branch,
171#endif /* CONFIG_FTRACE_SELFTEST */ 178#endif /* CONFIG_FTRACE_SELFTEST */
179 .print_header = branch_print_header,
172}; 180};
173 181
174__init static int init_branch_tracer(void) 182__init static int init_branch_tracer(void)
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c
index bae791ebcc51..118439709fb7 100644
--- a/kernel/trace/trace_power.c
+++ b/kernel/trace/trace_power.c
@@ -186,6 +186,12 @@ static enum print_line_t power_print_line(struct trace_iterator *iter)
186 return TRACE_TYPE_UNHANDLED; 186 return TRACE_TYPE_UNHANDLED;
187} 187}
188 188
189static void power_print_header(struct seq_file *s)
190{
191 seq_puts(s, "# TIMESTAMP STATE EVENT\n");
192 seq_puts(s, "# | | |\n");
193}
194
189static struct tracer power_tracer __read_mostly = 195static struct tracer power_tracer __read_mostly =
190{ 196{
191 .name = "power", 197 .name = "power",
@@ -194,6 +200,7 @@ static struct tracer power_tracer __read_mostly =
194 .stop = stop_power_trace, 200 .stop = stop_power_trace,
195 .reset = power_trace_reset, 201 .reset = power_trace_reset,
196 .print_line = power_print_line, 202 .print_line = power_print_line,
203 .print_header = power_print_header,
197}; 204};
198 205
199static int init_power_trace(void) 206static int init_power_trace(void)