aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c3
-rw-r--r--kernel/irq/manage.c5
-rw-r--r--kernel/lockdep.c22
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/power/disk.c51
-rw-r--r--kernel/ptrace.c4
-rw-r--r--kernel/slow-work.c4
-rw-r--r--kernel/time/clocksource.c8
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/timekeeping.c12
10 files changed, 62 insertions, 51 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 917ab9525568..6e7351739a82 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -734,9 +734,6 @@ int audit_tag_tree(char *old, char *new)
734 dentry = dget(path.dentry); 734 dentry = dget(path.dentry);
735 path_put(&path); 735 path_put(&path);
736 736
737 if (dentry == tagged->mnt_root && dentry == mnt->mnt_root)
738 follow_up(&mnt, &dentry);
739
740 list_add_tail(&list, &tagged->mnt_list); 737 list_add_tail(&list, &tagged->mnt_list);
741 738
742 mutex_lock(&audit_filter_mutex); 739 mutex_lock(&audit_filter_mutex);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 7e2e7dd4cd2f..2734eca59243 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -109,10 +109,9 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
109 spin_lock_irqsave(&desc->lock, flags); 109 spin_lock_irqsave(&desc->lock, flags);
110 110
111#ifdef CONFIG_GENERIC_PENDING_IRQ 111#ifdef CONFIG_GENERIC_PENDING_IRQ
112 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { 112 if (desc->status & IRQ_MOVE_PCNTXT)
113 cpumask_copy(desc->affinity, cpumask);
114 desc->chip->set_affinity(irq, cpumask); 113 desc->chip->set_affinity(irq, cpumask);
115 } else { 114 else {
116 desc->status |= IRQ_MOVE_PENDING; 115 desc->status |= IRQ_MOVE_PENDING;
117 cpumask_copy(desc->pending_mask, cpumask); 116 cpumask_copy(desc->pending_mask, cpumask);
118 } 117 }
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index b0f011866969..accb40cdb12a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2490,13 +2490,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2490void lockdep_init_map(struct lockdep_map *lock, const char *name, 2490void lockdep_init_map(struct lockdep_map *lock, const char *name,
2491 struct lock_class_key *key, int subclass) 2491 struct lock_class_key *key, int subclass)
2492{ 2492{
2493 if (unlikely(!debug_locks)) 2493 lock->class_cache = NULL;
2494#ifdef CONFIG_LOCK_STAT
2495 lock->cpu = raw_smp_processor_id();
2496#endif
2497
2498 if (DEBUG_LOCKS_WARN_ON(!name)) {
2499 lock->name = "NULL";
2494 return; 2500 return;
2501 }
2502
2503 lock->name = name;
2495 2504
2496 if (DEBUG_LOCKS_WARN_ON(!key)) 2505 if (DEBUG_LOCKS_WARN_ON(!key))
2497 return; 2506 return;
2498 if (DEBUG_LOCKS_WARN_ON(!name))
2499 return;
2500 /* 2507 /*
2501 * Sanity check, the lock-class key must be persistent: 2508 * Sanity check, the lock-class key must be persistent:
2502 */ 2509 */
@@ -2505,12 +2512,11 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
2505 DEBUG_LOCKS_WARN_ON(1); 2512 DEBUG_LOCKS_WARN_ON(1);
2506 return; 2513 return;
2507 } 2514 }
2508 lock->name = name;
2509 lock->key = key; 2515 lock->key = key;
2510 lock->class_cache = NULL; 2516
2511#ifdef CONFIG_LOCK_STAT 2517 if (unlikely(!debug_locks))
2512 lock->cpu = raw_smp_processor_id(); 2518 return;
2513#endif 2519
2514 if (subclass) 2520 if (subclass)
2515 register_lock_class(lock, subclass, 1); 2521 register_lock_class(lock, subclass, 1);
2516} 2522}
diff --git a/kernel/panic.c b/kernel/panic.c
index 934fb377f4b3..3dcaa1661357 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -221,7 +221,7 @@ void add_taint(unsigned flag)
221 * post-warning case. 221 * post-warning case.
222 */ 222 */
223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off()) 223 if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
224 printk(KERN_WARNING "Disabling lockdep due to kernel taint\n"); 224 printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
225 225
226 set_bit(flag, &tainted_mask); 226 set_bit(flag, &tainted_mask);
227} 227}
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 0854770b63b9..e71ca9cd81b2 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -646,13 +646,6 @@ static int software_resume(void)
646 return 0; 646 return 0;
647 647
648 /* 648 /*
649 * We can't depend on SCSI devices being available after loading one of
650 * their modules if scsi_complete_async_scans() is not called and the
651 * resume device usually is a SCSI one.
652 */
653 scsi_complete_async_scans();
654
655 /*
656 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs 649 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
657 * is configured into the kernel. Since the regular hibernate 650 * is configured into the kernel. Since the regular hibernate
658 * trigger path is via sysfs which takes a buffer mutex before 651 * trigger path is via sysfs which takes a buffer mutex before
@@ -663,32 +656,42 @@ static int software_resume(void)
663 * here to avoid lockdep complaining. 656 * here to avoid lockdep complaining.
664 */ 657 */
665 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING); 658 mutex_lock_nested(&pm_mutex, SINGLE_DEPTH_NESTING);
659
660 if (swsusp_resume_device)
661 goto Check_image;
662
663 if (!strlen(resume_file)) {
664 error = -ENOENT;
665 goto Unlock;
666 }
667
668 pr_debug("PM: Checking image partition %s\n", resume_file);
669
670 /* Check if the device is there */
671 swsusp_resume_device = name_to_dev_t(resume_file);
666 if (!swsusp_resume_device) { 672 if (!swsusp_resume_device) {
667 if (!strlen(resume_file)) {
668 mutex_unlock(&pm_mutex);
669 return -ENOENT;
670 }
671 /* 673 /*
672 * Some device discovery might still be in progress; we need 674 * Some device discovery might still be in progress; we need
673 * to wait for this to finish. 675 * to wait for this to finish.
674 */ 676 */
675 wait_for_device_probe(); 677 wait_for_device_probe();
678 /*
679 * We can't depend on SCSI devices being available after loading
680 * one of their modules until scsi_complete_async_scans() is
681 * called and the resume device usually is a SCSI one.
682 */
683 scsi_complete_async_scans();
684
676 swsusp_resume_device = name_to_dev_t(resume_file); 685 swsusp_resume_device = name_to_dev_t(resume_file);
677 pr_debug("PM: Resume from partition %s\n", resume_file); 686 if (!swsusp_resume_device) {
678 } else { 687 error = -ENODEV;
679 pr_debug("PM: Resume from partition %d:%d\n", 688 goto Unlock;
680 MAJOR(swsusp_resume_device), 689 }
681 MINOR(swsusp_resume_device));
682 } 690 }
683 691
684 if (noresume) { 692 Check_image:
685 /** 693 pr_debug("PM: Resume from partition %d:%d\n",
686 * FIXME: If noresume is specified, we need to find the 694 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device));
687 * partition and reset it back to normal swap space.
688 */
689 mutex_unlock(&pm_mutex);
690 return 0;
691 }
692 695
693 pr_debug("PM: Checking hibernation image.\n"); 696 pr_debug("PM: Checking hibernation image.\n");
694 error = swsusp_check(); 697 error = swsusp_check();
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index dfcd83ceee3b..0692ab5a0d67 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -188,7 +188,7 @@ int ptrace_attach(struct task_struct *task)
188 /* Protect exec's credential calculations against our interference; 188 /* Protect exec's credential calculations against our interference;
189 * SUID, SGID and LSM creds get determined differently under ptrace. 189 * SUID, SGID and LSM creds get determined differently under ptrace.
190 */ 190 */
191 retval = mutex_lock_interruptible(&current->cred_exec_mutex); 191 retval = mutex_lock_interruptible(&task->cred_exec_mutex);
192 if (retval < 0) 192 if (retval < 0)
193 goto out; 193 goto out;
194 194
@@ -232,7 +232,7 @@ repeat:
232bad: 232bad:
233 write_unlock_irqrestore(&tasklist_lock, flags); 233 write_unlock_irqrestore(&tasklist_lock, flags);
234 task_unlock(task); 234 task_unlock(task);
235 mutex_unlock(&current->cred_exec_mutex); 235 mutex_unlock(&task->cred_exec_mutex);
236out: 236out:
237 return retval; 237 return retval;
238} 238}
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index cf2bc01186ef..b28d19135f43 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -609,14 +609,14 @@ void slow_work_unregister_user(void)
609 if (slow_work_user_count == 0) { 609 if (slow_work_user_count == 0) {
610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n"); 610 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
611 slow_work_threads_should_exit = true; 611 slow_work_threads_should_exit = true;
612 del_timer_sync(&slow_work_cull_timer);
613 del_timer_sync(&slow_work_oom_timer);
612 wake_up_all(&slow_work_thread_wq); 614 wake_up_all(&slow_work_thread_wq);
613 wait_for_completion(&slow_work_last_thread_exited); 615 wait_for_completion(&slow_work_last_thread_exited);
614 printk(KERN_NOTICE "Slow work thread pool:" 616 printk(KERN_NOTICE "Slow work thread pool:"
615 " Shut down complete\n"); 617 " Shut down complete\n");
616 } 618 }
617 619
618 del_timer_sync(&slow_work_cull_timer);
619
620 mutex_unlock(&slow_work_user_lock); 620 mutex_unlock(&slow_work_user_lock);
621} 621}
622EXPORT_SYMBOL(slow_work_unregister_user); 622EXPORT_SYMBOL(slow_work_unregister_user);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index c46c931a7fe7..ecfd7b5187e0 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -181,12 +181,12 @@ static void clocksource_watchdog(unsigned long data)
181 181
182 resumed = test_and_clear_bit(0, &watchdog_resumed); 182 resumed = test_and_clear_bit(0, &watchdog_resumed);
183 183
184 wdnow = watchdog->read(); 184 wdnow = watchdog->read(watchdog);
185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask); 185 wd_nsec = cyc2ns(watchdog, (wdnow - watchdog_last) & watchdog->mask);
186 watchdog_last = wdnow; 186 watchdog_last = wdnow;
187 187
188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { 188 list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) {
189 csnow = cs->read(); 189 csnow = cs->read(cs);
190 190
191 if (unlikely(resumed)) { 191 if (unlikely(resumed)) {
192 cs->wd_last = csnow; 192 cs->wd_last = csnow;
@@ -247,7 +247,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
247 247
248 list_add(&cs->wd_list, &watchdog_list); 248 list_add(&cs->wd_list, &watchdog_list);
249 if (!started && watchdog) { 249 if (!started && watchdog) {
250 watchdog_last = watchdog->read(); 250 watchdog_last = watchdog->read(watchdog);
251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 251 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
252 add_timer_on(&watchdog_timer, 252 add_timer_on(&watchdog_timer,
253 cpumask_first(cpu_online_mask)); 253 cpumask_first(cpu_online_mask));
@@ -268,7 +268,7 @@ static void clocksource_check_watchdog(struct clocksource *cs)
268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG; 268 cse->flags &= ~CLOCK_SOURCE_WATCHDOG;
269 /* Start if list is not empty */ 269 /* Start if list is not empty */
270 if (!list_empty(&watchdog_list)) { 270 if (!list_empty(&watchdog_list)) {
271 watchdog_last = watchdog->read(); 271 watchdog_last = watchdog->read(watchdog);
272 watchdog_timer.expires = 272 watchdog_timer.expires =
273 jiffies + WATCHDOG_INTERVAL; 273 jiffies + WATCHDOG_INTERVAL;
274 add_timer_on(&watchdog_timer, 274 add_timer_on(&watchdog_timer,
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 06f197560f3b..c3f6c30816e3 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -50,7 +50,7 @@
50 */ 50 */
51#define JIFFIES_SHIFT 8 51#define JIFFIES_SHIFT 8
52 52
53static cycle_t jiffies_read(void) 53static cycle_t jiffies_read(struct clocksource *cs)
54{ 54{
55 return (cycle_t) jiffies; 55 return (cycle_t) jiffies;
56} 56}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 900f1b6598d1..687dff49f6e7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -182,7 +182,7 @@ EXPORT_SYMBOL(do_settimeofday);
182 */ 182 */
183static void change_clocksource(void) 183static void change_clocksource(void)
184{ 184{
185 struct clocksource *new; 185 struct clocksource *new, *old;
186 186
187 new = clocksource_get_next(); 187 new = clocksource_get_next();
188 188
@@ -191,11 +191,16 @@ static void change_clocksource(void)
191 191
192 clocksource_forward_now(); 192 clocksource_forward_now();
193 193
194 new->raw_time = clock->raw_time; 194 if (clocksource_enable(new))
195 return;
195 196
197 new->raw_time = clock->raw_time;
198 old = clock;
196 clock = new; 199 clock = new;
200 clocksource_disable(old);
201
197 clock->cycle_last = 0; 202 clock->cycle_last = 0;
198 clock->cycle_last = clocksource_read(new); 203 clock->cycle_last = clocksource_read(clock);
199 clock->error = 0; 204 clock->error = 0;
200 clock->xtime_nsec = 0; 205 clock->xtime_nsec = 0;
201 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 206 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -292,6 +297,7 @@ void __init timekeeping_init(void)
292 ntp_init(); 297 ntp_init();
293 298
294 clock = clocksource_get_next(); 299 clock = clocksource_get_next();
300 clocksource_enable(clock);
295 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 301 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
296 clock->cycle_last = clocksource_read(clock); 302 clock->cycle_last = clocksource_read(clock);
297 303