aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-05-11 03:33:06 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-11 03:50:02 -0400
commit7a309490da98981558a07183786201f02a6341e2 (patch)
tree204bfd3bc344dbb02be0b1eac29b956f6722e661 /kernel
parent9a8709d44139748fe2e0ab56d20d8c384c8b65ad (diff)
parent091bf7624d1c90cec9e578a18529f615213ff847 (diff)
Merge commit 'v2.6.30-rc5' into x86/apic
Merge reason: this branch was on a .30-rc2 base - sync it up with all the latest fixes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c3
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/manage.c5
-rw-r--r--kernel/kprobes.c31
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/panic.c15
-rw-r--r--kernel/posix-cpu-timers.c8
-rw-r--r--kernel/power/disk.c51
-rw-r--r--kernel/power/main.c24
-rw-r--r--kernel/ptrace.c4
-rw-r--r--kernel/resource.c46
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/slow-work.c4
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/tick-common.c12
-rw-r--r--kernel/time/timekeeping.c12
-rw-r--r--kernel/trace/trace.c1
20 files changed, 146 insertions, 115 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 917ab9525568..6e7351739a82 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new)
734 dentry = dget(path.dentry); 734 dentry = dget(path.dentry);
735 path_put(&path); 735 path_put(&path);
736 736
737 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
738 follow_up(&mnt, &dentry);
739
740 list_add_tail(&list, &tagged->mnt_list); 737 list_add_tail(&list, &tagged->mnt_list);
741 738
742 mutex_lock(&audit_filter_mutex); 739 mutex_lock(&audit_filter_mutex);
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index a6fe71fd5d1b..713098ee5a02 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1028,7 +1028,7 @@ static void audit_update_watch(struct audit_parent *parent,
1028 1028
1029 if (audit_enabled) { 1029 if (audit_enabled) {
1030 struct audit_buffer *ab; 1030 struct audit_buffer *ab;
1031 ab = audit_log_start(NULL, GFP_KERNEL, 1031 ab = audit_log_start(NULL, GFP_NOFS,
1032 AUDIT_CONFIG_CHANGE); 1032 AUDIT_CONFIG_CHANGE);
1033 audit_log_format(ab, "auid=%u ses=%u", 1033 audit_log_format(ab, "auid=%u ses=%u",
1034 audit_get_loginuid(current), 1034 audit_get_loginuid(current),
@@ -1067,7 +1067,7 @@ static void audit_remove_parent_watches(struct audit_parent *parent)
1067 e = container_of(r, struct audit_entry, rule); 1067 e = container_of(r, struct audit_entry, rule);
1068 if (audit_enabled) { 1068 if (audit_enabled) {
1069 struct audit_buffer *ab; 1069 struct audit_buffer *ab;
1070 ab = audit_log_start(NULL, GFP_KERNEL, 1070 ab = audit_log_start(NULL, GFP_NOFS,
1071 AUDIT_CONFIG_CHANGE); 1071 AUDIT_CONFIG_CHANGE);
1072 audit_log_format(ab, "auid=%u ses=%u", 1072 audit_log_format(ab, "auid=%u ses=%u",
1073 audit_get_loginuid(current), 1073 audit_get_loginuid(current),
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index d82142be8dd2..26e08754744f 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -363,8 +363,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
363 irqreturn_t ret, retval = IRQ_NONE; 363 irqreturn_t ret, retval = IRQ_NONE;
364 unsigned int status = 0; 364 unsigned int status = 0;
365 365
366 WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!");
367
368 if (!(action->flags & IRQF_DISABLED)) 366 if (!(action->flags & IRQF_DISABLED))
369 local_irq_enable_in_hardirq(); 367 local_irq_enable_in_hardirq();
370 368
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7e2e7dd4cd2f..2734eca59243 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109 spin_lock_irqsave(&desc->lock, flags); 109 spin_lock_irqsave(&desc->lock, flags);
110 110
111#ifdef CONFIG_GENERIC_PENDING_IRQ 111#ifdef CONFIG_GENERIC_PENDING_IRQ
112 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 112 if (desc->status & IRQ_MOVE_PCNTXT)
113 cpumask_copy(desc->affinity, cpumask);
114 desc->chip->set_affinity(irq, cpumask); 113 desc->chip->set_affinity(irq, cpumask);
115 } else { 114 else {
116 desc->status |= IRQ_MOVE_PENDING; 115 desc->status |= IRQ_MOVE_PENDING;
117 cpumask_copy(desc->pending_mask, cpumask); 116 cpumask_copy(desc->pending_mask, cpumask);
118 } 117 }
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index a5e74ddee0e2..c0fa54b276d9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -319,6 +319,22 @@ struct kprobe __kprobes *get_kprobe(void *addr)
319 return NULL; 319 return NULL;
320} 320}
321 321
322/* Arm a kprobe with text_mutex */
323static void __kprobes arm_kprobe(struct kprobe *kp)
324{
325 mutex_lock(&text_mutex);
326 arch_arm_kprobe(kp);
327 mutex_unlock(&text_mutex);
328}
329
330/* Disarm a kprobe with text_mutex */
331static void __kprobes disarm_kprobe(struct kprobe *kp)
332{
333 mutex_lock(&text_mutex);
334 arch_disarm_kprobe(kp);
335 mutex_unlock(&text_mutex);
336}
337
322/* 338/*
323 * Aggregate handlers for multiple kprobes support - these handlers 339 * Aggregate handlers for multiple kprobes support - these handlers
324 * take care of invoking the individual kprobe handlers on p->list 340 * take care of invoking the individual kprobe handlers on p->list
@@ -538,7 +554,7 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
538 ap->flags &= ~KPROBE_FLAG_DISABLED; 554 ap->flags &= ~KPROBE_FLAG_DISABLED;
539 if (!kprobes_all_disarmed) 555 if (!kprobes_all_disarmed)
540 /* Arm the breakpoint again. */ 556 /* Arm the breakpoint again. */
541 arch_arm_kprobe(ap); 557 arm_kprobe(ap);
542 } 558 }
543 return 0; 559 return 0;
544} 560}
@@ -789,11 +805,8 @@ static int __kprobes __unregister_kprobe_top(struct kprobe *p)
789 * enabled and not gone - otherwise, the breakpoint would 805 * enabled and not gone - otherwise, the breakpoint would
790 * already have been removed. We save on flushing icache. 806 * already have been removed. We save on flushing icache.
791 */ 807 */
792 if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) { 808 if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
793 mutex_lock(&text_mutex); 809 disarm_kprobe(p);
794 arch_disarm_kprobe(p);
795 mutex_unlock(&text_mutex);
796 }
797 hlist_del_rcu(&old_p->hlist); 810 hlist_del_rcu(&old_p->hlist);
798 } else { 811 } else {
799 if (p->break_handler && !kprobe_gone(p)) 812 if (p->break_handler && !kprobe_gone(p))
@@ -810,7 +823,7 @@ noclean:
810 if (!kprobe_disabled(old_p)) { 823 if (!kprobe_disabled(old_p)) {
811 try_to_disable_aggr_kprobe(old_p); 824 try_to_disable_aggr_kprobe(old_p);
812 if (!kprobes_all_disarmed && kprobe_disabled(old_p)) 825 if (!kprobes_all_disarmed && kprobe_disabled(old_p))
813 arch_disarm_kprobe(old_p); 826 disarm_kprobe(old_p);
814 } 827 }
815 } 828 }
816 return 0; 829 return 0;
@@ -1364,7 +1377,7 @@ int __kprobes disable_kprobe(struct kprobe *kp)
1364 try_to_disable_aggr_kprobe(p); 1377 try_to_disable_aggr_kprobe(p);
1365 1378
1366 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1379 if (!kprobes_all_disarmed && kprobe_disabled(p))
1367 arch_disarm_kprobe(p); 1380 disarm_kprobe(p);
1368out: 1381out:
1369 mutex_unlock(&kprobe_mutex); 1382 mutex_unlock(&kprobe_mutex);
1370 return ret; 1383 return ret;
@@ -1393,7 +1406,7 @@ int __kprobes enable_kprobe(struct kprobe *kp)
1393 } 1406 }
1394 1407
1395 if (!kprobes_all_disarmed && kprobe_disabled(p)) 1408 if (!kprobes_all_disarmed && kprobe_disabled(p))
1396 arch_arm_kprobe(p); 1409 arm_kprobe(p);
1397 1410
1398 p->flags &= ~KPROBE_FLAG_DISABLED; 1411 p->flags &= ~KPROBE_FLAG_DISABLED;
1399 if (p != kp) 1412 if (p != kp)
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b0f011866969..accb40cdb12a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2490void lockdep_init_map(struct lockdep_map *lock, const char *name, 2490void lockdep_init_map(struct lockdep_map *lock, const char *name,
2491 struct lock_class_key *key, int subclass) 2491 struct lock_class_key *key, int subclass)
2492{ 2492{
2493 if (unlikely(!debug_locks)) 2493 lock->class_cache = NULL;
2494#ifdef CONFIG_LOCK_STAT
2495 lock->cpu = raw_smp_processor_id();
2496#endif
2497
2498 if (DEBUG_LOCKS_WARN_ON(!name)) {
2499 lock->name = "NULL";
2494 return; 2500 return;
2501 }
2502
2503 lock->name = name;
2495 2504
2496 if (DEBUG_LOCKS_WARN_ON(!key)) 2505 if (DEBUG_LOCKS_WARN_ON(!key))
2497 return; 2506 return;
2498 if (DEBUG_LOCKS_WARN_ON(!name))
2499 return;
2500 /* 2507 /*
2501 * Sanity check, the lock-class key must be persistent: 2508 * Sanity check, the lock-class key must be persistent:
2502 */ 2509 */
@@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2505 DEBUG_LOCKS_WARN_ON(1); 2512 DEBUG_LOCKS_WARN_ON(1);
2506 return; 2513 return;
2507 } 2514 }
2508 lock->name = name;
2509 lock->key = key; 2515 lock->key = key;
2510 lock->class_cache = NULL; 2516
2511#ifdef CONFIG_LOCK_STAT 2517 if (unlikely(!debug_locks))
2512 lock->cpu = raw_smp_processor_id(); 2518 return;
2513#endif 2519
2514 if (subclass) 2520 if (subclass)
2515 register_lock_class(lock, subclass, 1); 2521 register_lock_class(lock, subclass, 1);
2516} 2522}
diff --git a/kernel/panic.c b/kernel/panic.c
index 934fb377f4b3..874ecf1307ae 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -221,7 +221,7 @@ void add_taint(unsigned flag)
221 * post-warning case. 221 * post-warning case.
222 */ 222 */
223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) 223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
224 printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); 224 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
225 225
226 set_bit(flag, &tainted_mask); 226 set_bit(flag, &tainted_mask);
227} 227}
@@ -340,7 +340,7 @@ void oops_exit(void)
340} 340}
341 341
342#ifdef WANT_WARN_ON_SLOWPATH 342#ifdef WANT_WARN_ON_SLOWPATH
343void warn_slowpath(const char *file, int line, const char *fmt, ...) 343void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
344{ 344{
345 va_list args; 345 va_list args;
346 char function[KSYM_SYMBOL_LEN]; 346 char function[KSYM_SYMBOL_LEN];
@@ -356,7 +356,7 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
356 if (board) 356 if (board)
357 printk(KERN_WARNING "Hardware name: %s\n", board); 357 printk(KERN_WARNING "Hardware name: %s\n", board);
358 358
359 if (fmt) { 359 if (*fmt) {
360 va_start(args, fmt); 360 va_start(args, fmt);
361 vprintk(fmt, args); 361 vprintk(fmt, args);
362 va_end(args); 362 va_end(args);
@@ -367,7 +367,14 @@ void warn_slowpath(const char *file, int line, const char *fmt, ...)
367 print_oops_end_marker(); 367 print_oops_end_marker();
368 add_taint(TAINT_WARN); 368 add_taint(TAINT_WARN);
369} 369}
370EXPORT_SYMBOL(warn_slowpath); 370EXPORT_SYMBOL(warn_slowpath_fmt);
371
372void warn_slowpath_null(const char *file, int line)
373{
374 static const char *empty = "";
375 warn_slowpath_fmt(file, line, empty);
376}
377EXPORT_SYMBOL(warn_slowpath_null);
371#endif 378#endif
372 379
373#ifdef CONFIG_CC_STACKPROTECTOR 380#ifdef CONFIG_CC_STACKPROTECTOR
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c9dcf98b4463..bece7c0b67b2 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1420,19 +1420,19 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1420 * timer call will interfere. 1420 * timer call will interfere.
1421 */ 1421 */
1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { 1422 list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) {
1423 int firing; 1423 int cpu_firing;
1424
1424 spin_lock(&timer->it_lock); 1425 spin_lock(&timer->it_lock);
1425 list_del_init(&timer->it.cpu.entry); 1426 list_del_init(&timer->it.cpu.entry);
1426 firing = timer->it.cpu.firing; 1427 cpu_firing = timer->it.cpu.firing;
1427 timer->it.cpu.firing = 0; 1428 timer->it.cpu.firing = 0;
1428 /* 1429 /*
1429 * The firing flag is -1 if we collided with a reset 1430 * The firing flag is -1 if we collided with a reset
1430 * of the timer, which already reported this 1431 * of the timer, which already reported this
1431 * almost-firing as an overrun. So don't generate an event. 1432 * almost-firing as an overrun. So don't generate an event.
1432 */ 1433 */
1433 if (likely(firing >= 0)) { 1434 if (likely(cpu_firing >= 0))
1434 cpu_timer_fire(timer); 1435 cpu_timer_fire(timer);
1435 }
1436 spin_unlock(&timer->it_lock); 1436 spin_unlock(&timer->it_lock);
1437 } 1437 }
1438} 1438}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 0854770b63b9..e71ca9cd81b2 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -646,13 +646,6 @@ static int software_resume(void)
646 return 0; 646 return 0;
647 647
648 /* 648 /*
649 * We can't depend on SCSI devices being available after loading one of
650 * their modules if scsi_complete_async_scans() is not called and the
651 * resume device usually is a SCSI one.
652 */
653 scsi_complete_async_scans();
654
655 /*
656 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs 649 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
657 * is configured into the kernel. Since the regular hibernate 650 * is configured into the kernel. Since the regular hibernate
658 * trigger path is via sysfs which takes a buffer mutex before 651 * trigger path is via sysfs which takes a buffer mutex before
@@ -663,32 +656,42 @@ static int software_resume(void)
663 * here to avoid lockdep complaining. 656 * here to avoid lockdep complaining.
664 */ 657 */
665 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); 658 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
659
660 if (swsusp_resume_device)
661 goto Check_image;
662
663 if (!strlen(resume_file)) {
664 error = -ENOENT;
665 goto Unlock;
666 }
667
668 pr_debug("PM: Checking image partition %s\n", resume_file);
669
670 /* Check if the device is there */
671 swsusp_resume_device = name_to_dev_t(resume_file);
666 if (!swsusp_resume_device) { 672 if (!swsusp_resume_device) {
667 if (!strlen(resume_file)) {
668 mutex_unlock(&pm_mutex);
669 return -ENOENT;
670 }
671 /* 673 /*
672 * Some device discovery might still be in progress; we need 674 * Some device discovery might still be in progress; we need
673 * to wait for this to finish. 675 * to wait for this to finish.
674 */ 676 */
675 wait_for_device_probe(); 677 wait_for_device_probe();
678 /*
679 * We can't depend on SCSI devices being available after loading
680 * one of their modules until scsi_complete_async_scans() is
681 * called and the resume device usually is a SCSI one.
682 */
683 scsi_complete_async_scans();
684
676 swsusp_resume_device = name_to_dev_t(resume_file); 685 swsusp_resume_device = name_to_dev_t(resume_file);
677 pr_debug("PM: Resume from partition %s\n", resume_file); 686 if (!swsusp_resume_device) {
678 } else { 687 error = -ENODEV;
679 pr_debug("PM: Resume from partition %d:%d\n", 688 goto Unlock;
680 MAJOR(swsusp_resume_device), 689 }
681 MINOR(swsusp_resume_device));
682 } 690 }
683 691
684 if (noresume) { 692 Check_image:
685 /** 693 pr_debug("PM: Resume from partition %d:%d\n",
686 * FIXME: If noresume is specified, we need to find the 694 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
687 * partition and reset it back to normal swap space.
688 */
689 mutex_unlock(&pm_mutex);
690 return 0;
691 }
692 695
693 pr_debug("PM: Checking hibernation image.\n"); 696 pr_debug("PM: Checking hibernation image.\n");
694 error = swsusp_check(); 697 error = swsusp_check();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index f172f41858bb..f99ed6a75eac 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -291,20 +291,26 @@ static int suspend_enter(suspend_state_t state)
291 291
292 device_pm_lock(); 292 device_pm_lock();
293 293
294 if (suspend_ops->prepare) {
295 error = suspend_ops->prepare();
296 if (error)
297 goto Done;
298 }
299
294 error = device_power_down(PMSG_SUSPEND); 300 error = device_power_down(PMSG_SUSPEND);
295 if (error) { 301 if (error) {
296 printk(KERN_ERR "PM: Some devices failed to power down\n"); 302 printk(KERN_ERR "PM: Some devices failed to power down\n");
297 goto Done; 303 goto Platfrom_finish;
298 } 304 }
299 305
300 if (suspend_ops->prepare) { 306 if (suspend_ops->prepare_late) {
301 error = suspend_ops->prepare(); 307 error = suspend_ops->prepare_late();
302 if (error) 308 if (error)
303 goto Power_up_devices; 309 goto Power_up_devices;
304 } 310 }
305 311
306 if (suspend_test(TEST_PLATFORM)) 312 if (suspend_test(TEST_PLATFORM))
307 goto Platfrom_finish; 313 goto Platform_wake;
308 314
309 error = disable_nonboot_cpus(); 315 error = disable_nonboot_cpus();
310 if (error || suspend_test(TEST_CPUS)) 316 if (error || suspend_test(TEST_CPUS))
@@ -326,13 +332,17 @@ static int suspend_enter(suspend_state_t state)
326 Enable_cpus: 332 Enable_cpus:
327 enable_nonboot_cpus(); 333 enable_nonboot_cpus();
328 334
329 Platfrom_finish: 335 Platform_wake:
330 if (suspend_ops->finish) 336 if (suspend_ops->wake)
331 suspend_ops->finish(); 337 suspend_ops->wake();
332 338
333 Power_up_devices: 339 Power_up_devices:
334 device_power_up(PMSG_RESUME); 340 device_power_up(PMSG_RESUME);
335 341
342 Platfrom_finish:
343 if (suspend_ops->finish)
344 suspend_ops->finish();
345
336 Done: 346 Done:
337 device_pm_unlock(); 347 device_pm_unlock();
338 348
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index dfcd83ceee3b..0692ab5a0d67 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task)
188 /* Protect exec's credential calculations against our interference; 188 /* Protect exec's credential calculations against our interference;
189 * SUID, SGID and LSM creds get determined differently under ptrace. 189 * SUID, SGID and LSM creds get determined differently under ptrace.
190 */ 190 */
191 retval = mutex_lock_interruptible(&current->cred_exec_mutex); 191 retval = mutex_lock_interruptible(&task->cred_exec_mutex);
192 if (retval < 0) 192 if (retval < 0)
193 goto out; 193 goto out;
194 194
@@ -232,7 +232,7 @@ repeat:
232bad: 232bad:
233 write_unlock_irqrestore(&tasklist_lock, flags); 233 write_unlock_irqrestore(&tasklist_lock, flags);
234 task_unlock(task); 234 task_unlock(task);
235 mutex_unlock(&current->cred_exec_mutex); 235 mutex_unlock(&task->cred_exec_mutex);
236out: 236out:
237 return retval; 237 return retval;
238} 238}
diff --git a/kernel/resource.c b/kernel/resource.c
index fd5d7d574bb9..ac5f3a36923f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -533,43 +533,21 @@ static void __init __reserve_region_with_split(struct resource *root,
533 res->end = end; 533 res->end = end;
534 res->flags = IORESOURCE_BUSY; 534 res->flags = IORESOURCE_BUSY;
535 535
536 for (;;) { 536 conflict = __request_resource(parent, res);
537 conflict = __request_resource(parent, res); 537 if (!conflict)
538 if (!conflict) 538 return;
539 break;
540 if (conflict != parent) {
541 parent = conflict;
542 if (!(conflict->flags & IORESOURCE_BUSY))
543 continue;
544 }
545
546 /* Uhhuh, that didn't work out.. */
547 kfree(res);
548 res = NULL;
549 break;
550 }
551
552 if (!res) {
553 /* failed, split and try again */
554
555 /* conflict covered whole area */
556 if (conflict->start <= start && conflict->end >= end)
557 return;
558 539
559 if (conflict->start > start) 540 /* failed, split and try again */
560 __reserve_region_with_split(root, start, conflict->start-1, name); 541 kfree(res);
561 if (!(conflict->flags & IORESOURCE_BUSY)) {
562 resource_size_t common_start, common_end;
563 542
564 common_start = max(conflict->start, start); 543 /* conflict covered whole area */
565 common_end = min(conflict->end, end); 544 if (conflict->start <= start && conflict->end >= end)
566 if (common_start < common_end) 545 return;
567 __reserve_region_with_split(root, common_start, common_end, name);
568 }
569 if (conflict->end < end)
570 __reserve_region_with_split(root, conflict->end+1, end, name);
571 }
572 546
547 if (conflict->start > start)
548 __reserve_region_with_split(root, start, conflict->start-1, name);
549 if (conflict->end < end)
550 __reserve_region_with_split(root, conflict->end+1, end, name);
573} 551}
574 552
575void __init reserve_region_with_split(struct resource *root, 553void __init reserve_region_with_split(struct resource *root,
diff --git a/kernel/sched.c b/kernel/sched.c
index b902e587a3a0..26efa475bdc1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4732,7 +4732,7 @@ void account_process_tick(struct task_struct *p, int user_tick)
4732 4732
4733 if (user_tick) 4733 if (user_tick)
4734 account_user_time(p, one_jiffy, one_jiffy_scaled); 4734 account_user_time(p, one_jiffy, one_jiffy_scaled);
4735 else if (p != rq->idle) 4735 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
4736 account_system_time(p, HARDIRQ_OFFSET, one_jiffy, 4736 account_system_time(p, HARDIRQ_OFFSET, one_jiffy,
4737 one_jiffy_scaled); 4737 one_jiffy_scaled);
4738 else 4738 else
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index cf2bc01186ef..b28d19135f43 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -609,14 +609,14 @@ void slow_work_unregister_user(void)
609 if (slow_work_user_count == 0) { 609 if (slow_work_user_count == 0) {
610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); 610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
611 slow_work_threads_should_exit = true; 611 slow_work_threads_should_exit = true;
612 del_timer_sync(&slow_work_cull_timer);
613 del_timer_sync(&slow_work_oom_timer);
612 wake_up_all(&slow_work_thread_wq); 614 wake_up_all(&slow_work_thread_wq);
613 wait_for_completion(&slow_work_last_thread_exited); 615 wait_for_completion(&slow_work_last_thread_exited);
614 printk(KERN_NOTICE "Slow work thread pool:" 616 printk(KERN_NOTICE "Slow work thread pool:"
615 " Shut down complete\n"); 617 " Shut down complete\n");
616 } 618 }
617 619
618 del_timer_sync(&slow_work_cull_timer);
619
620 mutex_unlock(&slow_work_user_lock); 620 mutex_unlock(&slow_work_user_lock);
621} 621}
622EXPORT_SYMBOL(slow_work_unregister_user); 622EXPORT_SYMBOL(slow_work_unregister_user);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e3d2c7dd59b9..ea78fa101ad6 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -103,6 +103,9 @@ static unsigned long one_ul = 1;
103static int one_hundred = 100; 103static int one_hundred = 100;
104static int one_thousand = 1000; 104static int one_thousand = 1000;
105 105
106/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
107static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
108
106/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 109/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
107static int maxolduid = 65535; 110static int maxolduid = 65535;
108static int minolduid; 111static int minolduid;
@@ -1006,7 +1009,7 @@ static struct ctl_table vm_table[] = {
1006 .mode = 0644, 1009 .mode = 0644,
1007 .proc_handler = &dirty_bytes_handler, 1010 .proc_handler = &dirty_bytes_handler,
1008 .strategy = &sysctl_intvec, 1011 .strategy = &sysctl_intvec,
1009 .extra1 = &one_ul, 1012 .extra1 = &dirty_bytes_min,
1010 }, 1013 },
1011 { 1014 {
1012 .procname = "dirty_writeback_centisecs", 1015 .procname = "dirty_writeback_centisecs",
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c46c931a7fe7..ecfd7b5187e0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data)
181 181
182 resumed = test_and_clear_bit(0, &watchdog_resumed); 182 resumed = test_and_clear_bit(0, &watchdog_resumed);
183 183
184 wdnow = watchdog->read(); 184 wdnow = watchdog->read(watchdog);
185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
186 watchdog_last = wdnow; 186 watchdog_last = wdnow;
187 187
188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
189 csnow = cs->read(); 189 csnow = cs->read(cs);
190 190
191 if (unlikely(resumed)) { 191 if (unlikely(resumed)) {
192 cs->wd_last = csnow; 192 cs->wd_last = csnow;
@@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
247 247
248 list_add(&cs->wd_list, &watchdog_list); 248 list_add(&cs->wd_list, &watchdog_list);
249 if (!started && watchdog) { 249 if (!started && watchdog) {
250 watchdog_last = watchdog->read(); 250 watchdog_last = watchdog->read(watchdog);
251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
252 add_timer_on(&watchdog_timer, 252 add_timer_on(&watchdog_timer,
253 cpumask_first(cpu_online_mask)); 253 cpumask_first(cpu_online_mask));
@@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG; 268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
269 /* Start if list is not empty */ 269 /* Start if list is not empty */
270 if (!list_empty(&watchdog_list)) { 270 if (!list_empty(&watchdog_list)) {
271 watchdog_last = watchdog->read(); 271 watchdog_last = watchdog->read(watchdog);
272 watchdog_timer.expires = 272 watchdog_timer.expires =
273 jiffies + WATCHDOG_INTERVAL; 273 jiffies + WATCHDOG_INTERVAL;
274 add_timer_on(&watchdog_timer, 274 add_timer_on(&watchdog_timer,
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 06f197560f3b..c3f6c30816e3 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -50,7 +50,7 @@
50 */ 50 */
51#define JIFFIES_SHIFT 8 51#define JIFFIES_SHIFT 8
52 52
53static cycle_t jiffies_read(void) 53static cycle_t jiffies_read(struct clocksource *cs)
54{ 54{
55 return (cycle_t) jiffies; 55 return (cycle_t) jiffies;
56} 56}
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 21a5ca849514..83c4417b6a3c 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -93,7 +93,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
93 for (;;) { 93 for (;;) {
94 if (!clockevents_program_event(dev, next, ktime_get())) 94 if (!clockevents_program_event(dev, next, ktime_get()))
95 return; 95 return;
96 tick_periodic(cpu); 96 /*
97 * Have to be careful here. If we're in oneshot mode,
98 * before we call tick_periodic() in a loop, we need
99 * to be sure we're using a real hardware clocksource.
100 * Otherwise we could get trapped in an infinite
101 * loop, as the tick_periodic() increments jiffies,
102 * when then will increment time, posibly causing
103 * the loop to trigger again and again.
104 */
105 if (timekeeping_valid_for_hres())
106 tick_periodic(cpu);
97 next = ktime_add(next, tick_period); 107 next = ktime_add(next, tick_period);
98 } 108 }
99} 109}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 900f1b6598d1..687dff49f6e7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday);
182 */ 182 */
183static void change_clocksource(void) 183static void change_clocksource(void)
184{ 184{
185 struct clocksource *new; 185 struct clocksource *new, *old;
186 186
187 new = clocksource_get_next(); 187 new = clocksource_get_next();
188 188
@@ -191,11 +191,16 @@ static void change_clocksource(void)
191 191
192 clocksource_forward_now(); 192 clocksource_forward_now();
193 193
194 new->raw_time = clock->raw_time; 194 if (clocksource_enable(new))
195 return;
195 196
197 new->raw_time = clock->raw_time;
198 old = clock;
196 clock = new; 199 clock = new;
200 clocksource_disable(old);
201
197 clock->cycle_last = 0; 202 clock->cycle_last = 0;
198 clock->cycle_last = clocksource_read(new); 203 clock->cycle_last = clocksource_read(clock);
199 clock->error = 0; 204 clock->error = 0;
200 clock->xtime_nsec = 0; 205 clock->xtime_nsec = 0;
201 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 206 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -292,6 +297,7 @@ void __init timekeeping_init(void)
292 ntp_init(); 297 ntp_init();
293 298
294 clock = clocksource_get_next(); 299 clock = clocksource_get_next();
300 clocksource_enable(clock);
295 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 301 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
296 clock->cycle_last = clocksource_read(clock); 302 clock->cycle_last = clocksource_read(clock);
297 303
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1ce5dc6372b8..a884c09006c4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3448,6 +3448,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
3448 if (!ref) 3448 if (!ref)
3449 break; 3449 break;
3450 3450
3451 ref->ref = 1;
3451 ref->buffer = info->tr->buffer; 3452 ref->buffer = info->tr->buffer;
3452 ref->page = ring_buffer_alloc_read_page(ref->buffer); 3453 ref->page = ring_buffer_alloc_read_page(ref->buffer);
3453 if (!ref->page) { 3454 if (!ref->page) {