aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-26 07:02:23 -0500
committerIngo Molnar <mingo@elte.hu>2009-02-26 07:02:23 -0500
commit8e818179eb9e8f9e44d8410dd2a25077d026a08e (patch)
tree7d08afd30c95c04129c20693d974a18799caeb5a /kernel
parent742bd95ba96e19b3f7196c3a0834ebc17c8ba006 (diff)
parentecc25fbd6b9e07b33895c61ddf84006b00f55d99 (diff)
Merge branch 'x86/core' into perfcounters/core
Conflicts: arch/x86/kernel/apic/apic.c arch/x86/kernel/irqinit_32.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/futex.c53
-rw-r--r--kernel/kexec.c7
-rw-r--r--kernel/posix-cpu-timers.c60
-rw-r--r--kernel/power/Makefile2
-rw-r--r--kernel/power/console.c6
-rw-r--r--kernel/power/disk.c22
-rw-r--r--kernel/power/main.c8
-rw-r--r--kernel/power/swap.c5
-rw-r--r--kernel/power/user.c8
-rw-r--r--kernel/printk.c15
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/trace/Kconfig25
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/trace_mmiotrace.c14
-rw-r--r--kernel/trace/trace_selftest.c19
17 files changed, 189 insertions, 79 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 5537554ed808..9ef39e5b0211 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_UID16) += uid16.o
51obj-$(CONFIG_MODULES) += module.o 51obj-$(CONFIG_MODULES) += module.o
52obj-$(CONFIG_KALLSYMS) += kallsyms.o 52obj-$(CONFIG_KALLSYMS) += kallsyms.o
53obj-$(CONFIG_PM) += power/ 53obj-$(CONFIG_PM) += power/
54obj-$(CONFIG_FREEZER) += power/
54obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o 55obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
55obj-$(CONFIG_KEXEC) += kexec.o 56obj-$(CONFIG_KEXEC) += kexec.o
56obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o 57obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e14db9c089b9..9edb5c4b79b4 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1122,8 +1122,8 @@ static void cgroup_kill_sb(struct super_block *sb) {
1122 1122
1123 mutex_unlock(&cgroup_mutex); 1123 mutex_unlock(&cgroup_mutex);
1124 1124
1125 kfree(root);
1126 kill_litter_super(sb); 1125 kill_litter_super(sb);
1126 kfree(root);
1127} 1127}
1128 1128
1129static struct file_system_type cgroup_fs_type = { 1129static struct file_system_type cgroup_fs_type = {
diff --git a/kernel/futex.c b/kernel/futex.c
index f89d373a9c6d..438701adce23 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1165,6 +1165,7 @@ static int futex_wait(u32 __user *uaddr, int fshared,
1165 u32 val, ktime_t *abs_time, u32 bitset, int clockrt) 1165 u32 val, ktime_t *abs_time, u32 bitset, int clockrt)
1166{ 1166{
1167 struct task_struct *curr = current; 1167 struct task_struct *curr = current;
1168 struct restart_block *restart;
1168 DECLARE_WAITQUEUE(wait, curr); 1169 DECLARE_WAITQUEUE(wait, curr);
1169 struct futex_hash_bucket *hb; 1170 struct futex_hash_bucket *hb;
1170 struct futex_q q; 1171 struct futex_q q;
@@ -1216,11 +1217,13 @@ retry:
1216 1217
1217 if (!ret) 1218 if (!ret)
1218 goto retry; 1219 goto retry;
1219 return ret; 1220 goto out;
1220 } 1221 }
1221 ret = -EWOULDBLOCK; 1222 ret = -EWOULDBLOCK;
1222 if (uval != val) 1223 if (unlikely(uval != val)) {
1223 goto out_unlock_put_key; 1224 queue_unlock(&q, hb);
1225 goto out_put_key;
1226 }
1224 1227
1225 /* Only actually queue if *uaddr contained val. */ 1228 /* Only actually queue if *uaddr contained val. */
1226 queue_me(&q, hb); 1229 queue_me(&q, hb);
@@ -1284,38 +1287,38 @@ retry:
1284 */ 1287 */
1285 1288
1286 /* If we were woken (and unqueued), we succeeded, whatever. */ 1289 /* If we were woken (and unqueued), we succeeded, whatever. */
1290 ret = 0;
1287 if (!unqueue_me(&q)) 1291 if (!unqueue_me(&q))
1288 return 0; 1292 goto out_put_key;
1293 ret = -ETIMEDOUT;
1289 if (rem) 1294 if (rem)
1290 return -ETIMEDOUT; 1295 goto out_put_key;
1291 1296
1292 /* 1297 /*
1293 * We expect signal_pending(current), but another thread may 1298 * We expect signal_pending(current), but another thread may
1294 * have handled it for us already. 1299 * have handled it for us already.
1295 */ 1300 */
1301 ret = -ERESTARTSYS;
1296 if (!abs_time) 1302 if (!abs_time)
1297 return -ERESTARTSYS; 1303 goto out_put_key;
1298 else {
1299 struct restart_block *restart;
1300 restart = &current_thread_info()->restart_block;
1301 restart->fn = futex_wait_restart;
1302 restart->futex.uaddr = (u32 *)uaddr;
1303 restart->futex.val = val;
1304 restart->futex.time = abs_time->tv64;
1305 restart->futex.bitset = bitset;
1306 restart->futex.flags = 0;
1307
1308 if (fshared)
1309 restart->futex.flags |= FLAGS_SHARED;
1310 if (clockrt)
1311 restart->futex.flags |= FLAGS_CLOCKRT;
1312 return -ERESTART_RESTARTBLOCK;
1313 }
1314 1304
1315out_unlock_put_key: 1305 restart = &current_thread_info()->restart_block;
1316 queue_unlock(&q, hb); 1306 restart->fn = futex_wait_restart;
1317 put_futex_key(fshared, &q.key); 1307 restart->futex.uaddr = (u32 *)uaddr;
1308 restart->futex.val = val;
1309 restart->futex.time = abs_time->tv64;
1310 restart->futex.bitset = bitset;
1311 restart->futex.flags = 0;
1312
1313 if (fshared)
1314 restart->futex.flags |= FLAGS_SHARED;
1315 if (clockrt)
1316 restart->futex.flags |= FLAGS_CLOCKRT;
1318 1317
1318 ret = -ERESTART_RESTARTBLOCK;
1319
1320out_put_key:
1321 put_futex_key(fshared, &q.key);
1319out: 1322out:
1320 return ret; 1323 return ret;
1321} 1324}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 795e7b67a228..c7fd6692939d 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1465,6 +1465,11 @@ int kernel_kexec(void)
1465 error = device_power_down(PMSG_FREEZE); 1465 error = device_power_down(PMSG_FREEZE);
1466 if (error) 1466 if (error)
1467 goto Enable_irqs; 1467 goto Enable_irqs;
1468
1469 /* Suspend system devices */
1470 error = sysdev_suspend(PMSG_FREEZE);
1471 if (error)
1472 goto Power_up_devices;
1468 } else 1473 } else
1469#endif 1474#endif
1470 { 1475 {
@@ -1477,6 +1482,8 @@ int kernel_kexec(void)
1477 1482
1478#ifdef CONFIG_KEXEC_JUMP 1483#ifdef CONFIG_KEXEC_JUMP
1479 if (kexec_image->preserve_context) { 1484 if (kexec_image->preserve_context) {
1485 sysdev_resume();
1486 Power_up_devices:
1480 device_power_up(PMSG_RESTORE); 1487 device_power_up(PMSG_RESTORE);
1481 Enable_irqs: 1488 Enable_irqs:
1482 local_irq_enable(); 1489 local_irq_enable();
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 2313a4cc14ea..e976e505648d 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -681,6 +681,33 @@ static void cpu_timer_fire(struct k_itimer *timer)
681} 681}
682 682
683/* 683/*
684 * Sample a process (thread group) timer for the given group_leader task.
685 * Must be called with tasklist_lock held for reading.
686 */
687static int cpu_timer_sample_group(const clockid_t which_clock,
688 struct task_struct *p,
689 union cpu_time_count *cpu)
690{
691 struct task_cputime cputime;
692
693 thread_group_cputimer(p, &cputime);
694 switch (CPUCLOCK_WHICH(which_clock)) {
695 default:
696 return -EINVAL;
697 case CPUCLOCK_PROF:
698 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
699 break;
700 case CPUCLOCK_VIRT:
701 cpu->cpu = cputime.utime;
702 break;
703 case CPUCLOCK_SCHED:
704 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
705 break;
706 }
707 return 0;
708}
709
710/*
684 * Guts of sys_timer_settime for CPU timers. 711 * Guts of sys_timer_settime for CPU timers.
685 * This is called with the timer locked and interrupts disabled. 712 * This is called with the timer locked and interrupts disabled.
686 * If we return TIMER_RETRY, it's necessary to release the timer's lock 713 * If we return TIMER_RETRY, it's necessary to release the timer's lock
@@ -741,7 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
741 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 768 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
742 cpu_clock_sample(timer->it_clock, p, &val); 769 cpu_clock_sample(timer->it_clock, p, &val);
743 } else { 770 } else {
744 cpu_clock_sample_group(timer->it_clock, p, &val); 771 cpu_timer_sample_group(timer->it_clock, p, &val);
745 } 772 }
746 773
747 if (old) { 774 if (old) {
@@ -889,7 +916,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
889 read_unlock(&tasklist_lock); 916 read_unlock(&tasklist_lock);
890 goto dead; 917 goto dead;
891 } else { 918 } else {
892 cpu_clock_sample_group(timer->it_clock, p, &now); 919 cpu_timer_sample_group(timer->it_clock, p, &now);
893 clear_dead = (unlikely(p->exit_state) && 920 clear_dead = (unlikely(p->exit_state) &&
894 thread_group_empty(p)); 921 thread_group_empty(p));
895 } 922 }
@@ -1244,7 +1271,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1244 clear_dead_task(timer, now); 1271 clear_dead_task(timer, now);
1245 goto out_unlock; 1272 goto out_unlock;
1246 } 1273 }
1247 cpu_clock_sample_group(timer->it_clock, p, &now); 1274 cpu_timer_sample_group(timer->it_clock, p, &now);
1248 bump_cpu_timer(timer, now); 1275 bump_cpu_timer(timer, now);
1249 /* Leave the tasklist_lock locked for the call below. */ 1276 /* Leave the tasklist_lock locked for the call below. */
1250 } 1277 }
@@ -1409,33 +1436,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1409} 1436}
1410 1437
1411/* 1438/*
1412 * Sample a process (thread group) timer for the given group_leader task.
1413 * Must be called with tasklist_lock held for reading.
1414 */
1415static int cpu_timer_sample_group(const clockid_t which_clock,
1416 struct task_struct *p,
1417 union cpu_time_count *cpu)
1418{
1419 struct task_cputime cputime;
1420
1421 thread_group_cputimer(p, &cputime);
1422 switch (CPUCLOCK_WHICH(which_clock)) {
1423 default:
1424 return -EINVAL;
1425 case CPUCLOCK_PROF:
1426 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
1427 break;
1428 case CPUCLOCK_VIRT:
1429 cpu->cpu = cputime.utime;
1430 break;
1431 case CPUCLOCK_SCHED:
1432 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
1433 break;
1434 }
1435 return 0;
1436}
1437
1438/*
1439 * Set one of the process-wide special case CPU timers. 1439 * Set one of the process-wide special case CPU timers.
1440 * The tsk->sighand->siglock must be held by the caller. 1440 * The tsk->sighand->siglock must be held by the caller.
1441 * The *newval argument is relative and we update it to be absolute, *oldval 1441 * The *newval argument is relative and we update it to be absolute, *oldval
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index d7a10167a25b..720ea4f781bd 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -3,7 +3,7 @@ ifeq ($(CONFIG_PM_DEBUG),y)
3EXTRA_CFLAGS += -DDEBUG 3EXTRA_CFLAGS += -DDEBUG
4endif 4endif
5 5
6obj-y := main.o 6obj-$(CONFIG_PM) += main.o
7obj-$(CONFIG_PM_SLEEP) += console.o 7obj-$(CONFIG_PM_SLEEP) += console.o
8obj-$(CONFIG_FREEZER) += process.o 8obj-$(CONFIG_FREEZER) += process.o
9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o 9obj-$(CONFIG_HIBERNATION) += swsusp.o disk.o snapshot.o swap.o user.o
diff --git a/kernel/power/console.c b/kernel/power/console.c
index b8628be2a465..a3961b205de7 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -78,6 +78,12 @@ void pm_restore_console(void)
78 } 78 }
79 set_console(orig_fgconsole); 79 set_console(orig_fgconsole);
80 release_console_sem(); 80 release_console_sem();
81
82 if (vt_waitactive(orig_fgconsole)) {
83 pr_debug("Resume: Can't switch VCs.");
84 return;
85 }
86
81 kmsg_redirect = orig_kmsg; 87 kmsg_redirect = orig_kmsg;
82} 88}
83#endif 89#endif
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 432ee575c9ee..4a4a206b1979 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -227,6 +227,12 @@ static int create_image(int platform_mode)
227 "aborting hibernation\n"); 227 "aborting hibernation\n");
228 goto Enable_irqs; 228 goto Enable_irqs;
229 } 229 }
230 sysdev_suspend(PMSG_FREEZE);
231 if (error) {
232 printk(KERN_ERR "PM: Some devices failed to power down, "
233 "aborting hibernation\n");
234 goto Power_up_devices;
235 }
230 236
231 if (hibernation_test(TEST_CORE)) 237 if (hibernation_test(TEST_CORE))
232 goto Power_up; 238 goto Power_up;
@@ -242,9 +248,11 @@ static int create_image(int platform_mode)
242 if (!in_suspend) 248 if (!in_suspend)
243 platform_leave(platform_mode); 249 platform_leave(platform_mode);
244 Power_up: 250 Power_up:
251 sysdev_resume();
245 /* NOTE: device_power_up() is just a resume() for devices 252 /* NOTE: device_power_up() is just a resume() for devices
246 * that suspended with irqs off ... no overall powerup. 253 * that suspended with irqs off ... no overall powerup.
247 */ 254 */
255 Power_up_devices:
248 device_power_up(in_suspend ? 256 device_power_up(in_suspend ?
249 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 257 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
250 Enable_irqs: 258 Enable_irqs:
@@ -335,6 +343,7 @@ static int resume_target_kernel(void)
335 "aborting resume\n"); 343 "aborting resume\n");
336 goto Enable_irqs; 344 goto Enable_irqs;
337 } 345 }
346 sysdev_suspend(PMSG_QUIESCE);
338 /* We'll ignore saved state, but this gets preempt count (etc) right */ 347 /* We'll ignore saved state, but this gets preempt count (etc) right */
339 save_processor_state(); 348 save_processor_state();
340 error = restore_highmem(); 349 error = restore_highmem();
@@ -357,6 +366,7 @@ static int resume_target_kernel(void)
357 swsusp_free(); 366 swsusp_free();
358 restore_processor_state(); 367 restore_processor_state();
359 touch_softlockup_watchdog(); 368 touch_softlockup_watchdog();
369 sysdev_resume();
360 device_power_up(PMSG_RECOVER); 370 device_power_up(PMSG_RECOVER);
361 Enable_irqs: 371 Enable_irqs:
362 local_irq_enable(); 372 local_irq_enable();
@@ -440,6 +450,7 @@ int hibernation_platform_enter(void)
440 local_irq_disable(); 450 local_irq_disable();
441 error = device_power_down(PMSG_HIBERNATE); 451 error = device_power_down(PMSG_HIBERNATE);
442 if (!error) { 452 if (!error) {
453 sysdev_suspend(PMSG_HIBERNATE);
443 hibernation_ops->enter(); 454 hibernation_ops->enter();
444 /* We should never get here */ 455 /* We should never get here */
445 while (1); 456 while (1);
@@ -595,6 +606,12 @@ static int software_resume(void)
595 unsigned int flags; 606 unsigned int flags;
596 607
597 /* 608 /*
609 * If the user said "noresume".. bail out early.
610 */
611 if (noresume)
612 return 0;
613
614 /*
598 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs 615 * name_to_dev_t() below takes a sysfs buffer mutex when sysfs
599 * is configured into the kernel. Since the regular hibernate 616 * is configured into the kernel. Since the regular hibernate
600 * trigger path is via sysfs which takes a buffer mutex before 617 * trigger path is via sysfs which takes a buffer mutex before
@@ -610,6 +627,11 @@ static int software_resume(void)
610 mutex_unlock(&pm_mutex); 627 mutex_unlock(&pm_mutex);
611 return -ENOENT; 628 return -ENOENT;
612 } 629 }
630 /*
631 * Some device discovery might still be in progress; we need
632 * to wait for this to finish.
633 */
634 wait_for_device_probe();
613 swsusp_resume_device = name_to_dev_t(resume_file); 635 swsusp_resume_device = name_to_dev_t(resume_file);
614 pr_debug("PM: Resume from partition %s\n", resume_file); 636 pr_debug("PM: Resume from partition %s\n", resume_file);
615 } else { 637 } else {
diff --git a/kernel/power/main.c b/kernel/power/main.c
index b4d219016b6c..c9632f841f64 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -298,8 +298,12 @@ static int suspend_enter(suspend_state_t state)
298 goto Done; 298 goto Done;
299 } 299 }
300 300
301 if (!suspend_test(TEST_CORE)) 301 error = sysdev_suspend(PMSG_SUSPEND);
302 error = suspend_ops->enter(state); 302 if (!error) {
303 if (!suspend_test(TEST_CORE))
304 error = suspend_ops->enter(state);
305 sysdev_resume();
306 }
303 307
304 device_power_up(PMSG_RESUME); 308 device_power_up(PMSG_RESUME);
305 Done: 309 Done:
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index 6da14358537c..505f319e489c 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -60,6 +60,7 @@ static struct block_device *resume_bdev;
60static int submit(int rw, pgoff_t page_off, struct page *page, 60static int submit(int rw, pgoff_t page_off, struct page *page,
61 struct bio **bio_chain) 61 struct bio **bio_chain)
62{ 62{
63 const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG);
63 struct bio *bio; 64 struct bio *bio;
64 65
65 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 66 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
@@ -80,7 +81,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
80 bio_get(bio); 81 bio_get(bio);
81 82
82 if (bio_chain == NULL) { 83 if (bio_chain == NULL) {
83 submit_bio(rw | (1 << BIO_RW_SYNC), bio); 84 submit_bio(bio_rw, bio);
84 wait_on_page_locked(page); 85 wait_on_page_locked(page);
85 if (rw == READ) 86 if (rw == READ)
86 bio_set_pages_dirty(bio); 87 bio_set_pages_dirty(bio);
@@ -90,7 +91,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page,
90 get_page(page); /* These pages are freed later */ 91 get_page(page); /* These pages are freed later */
91 bio->bi_private = *bio_chain; 92 bio->bi_private = *bio_chain;
92 *bio_chain = bio; 93 *bio_chain = bio;
93 submit_bio(rw | (1 << BIO_RW_SYNC), bio); 94 submit_bio(bio_rw, bio);
94 } 95 }
95 return 0; 96 return 0;
96} 97}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 005b93d839ba..6c85359364f2 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -95,15 +95,15 @@ static int snapshot_open(struct inode *inode, struct file *filp)
95 data->swap = swsusp_resume_device ? 95 data->swap = swsusp_resume_device ?
96 swap_type_of(swsusp_resume_device, 0, NULL) : -1; 96 swap_type_of(swsusp_resume_device, 0, NULL) : -1;
97 data->mode = O_RDONLY; 97 data->mode = O_RDONLY;
98 error = pm_notifier_call_chain(PM_RESTORE_PREPARE); 98 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
99 if (error) 99 if (error)
100 pm_notifier_call_chain(PM_POST_RESTORE); 100 pm_notifier_call_chain(PM_POST_HIBERNATION);
101 } else { 101 } else {
102 data->swap = -1; 102 data->swap = -1;
103 data->mode = O_WRONLY; 103 data->mode = O_WRONLY;
104 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); 104 error = pm_notifier_call_chain(PM_RESTORE_PREPARE);
105 if (error) 105 if (error)
106 pm_notifier_call_chain(PM_POST_HIBERNATION); 106 pm_notifier_call_chain(PM_POST_RESTORE);
107 } 107 }
108 if (error) 108 if (error)
109 atomic_inc(&snapshot_device_available); 109 atomic_inc(&snapshot_device_available);
diff --git a/kernel/printk.c b/kernel/printk.c
index 69188f226a93..e3602d0755b0 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -73,7 +73,6 @@ EXPORT_SYMBOL(oops_in_progress);
73 * driver system. 73 * driver system.
74 */ 74 */
75static DECLARE_MUTEX(console_sem); 75static DECLARE_MUTEX(console_sem);
76static DECLARE_MUTEX(secondary_console_sem);
77struct console *console_drivers; 76struct console *console_drivers;
78EXPORT_SYMBOL_GPL(console_drivers); 77EXPORT_SYMBOL_GPL(console_drivers);
79 78
@@ -891,12 +890,14 @@ void suspend_console(void)
891 printk("Suspending console(s) (use no_console_suspend to debug)\n"); 890 printk("Suspending console(s) (use no_console_suspend to debug)\n");
892 acquire_console_sem(); 891 acquire_console_sem();
893 console_suspended = 1; 892 console_suspended = 1;
893 up(&console_sem);
894} 894}
895 895
896void resume_console(void) 896void resume_console(void)
897{ 897{
898 if (!console_suspend_enabled) 898 if (!console_suspend_enabled)
899 return; 899 return;
900 down(&console_sem);
900 console_suspended = 0; 901 console_suspended = 0;
901 release_console_sem(); 902 release_console_sem();
902} 903}
@@ -912,11 +913,9 @@ void resume_console(void)
912void acquire_console_sem(void) 913void acquire_console_sem(void)
913{ 914{
914 BUG_ON(in_interrupt()); 915 BUG_ON(in_interrupt());
915 if (console_suspended) {
916 down(&secondary_console_sem);
917 return;
918 }
919 down(&console_sem); 916 down(&console_sem);
917 if (console_suspended)
918 return;
920 console_locked = 1; 919 console_locked = 1;
921 console_may_schedule = 1; 920 console_may_schedule = 1;
922} 921}
@@ -926,6 +925,10 @@ int try_acquire_console_sem(void)
926{ 925{
927 if (down_trylock(&console_sem)) 926 if (down_trylock(&console_sem))
928 return -1; 927 return -1;
928 if (console_suspended) {
929 up(&console_sem);
930 return -1;
931 }
929 console_locked = 1; 932 console_locked = 1;
930 console_may_schedule = 0; 933 console_may_schedule = 0;
931 return 0; 934 return 0;
@@ -979,7 +982,7 @@ void release_console_sem(void)
979 unsigned wake_klogd = 0; 982 unsigned wake_klogd = 0;
980 983
981 if (console_suspended) { 984 if (console_suspended) {
982 up(&secondary_console_sem); 985 up(&console_sem);
983 return; 986 return;
984 } 987 }
985 988
diff --git a/kernel/sched.c b/kernel/sched.c
index 83b68ff6df80..16b73635f28f 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7026,20 +7026,26 @@ static void free_rootdomain(struct root_domain *rd)
7026 7026
7027static void rq_attach_root(struct rq *rq, struct root_domain *rd) 7027static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7028{ 7028{
7029 struct root_domain *old_rd = NULL;
7029 unsigned long flags; 7030 unsigned long flags;
7030 7031
7031 spin_lock_irqsave(&rq->lock, flags); 7032 spin_lock_irqsave(&rq->lock, flags);
7032 7033
7033 if (rq->rd) { 7034 if (rq->rd) {
7034 struct root_domain *old_rd = rq->rd; 7035 old_rd = rq->rd;
7035 7036
7036 if (cpumask_test_cpu(rq->cpu, old_rd->online)) 7037 if (cpumask_test_cpu(rq->cpu, old_rd->online))
7037 set_rq_offline(rq); 7038 set_rq_offline(rq);
7038 7039
7039 cpumask_clear_cpu(rq->cpu, old_rd->span); 7040 cpumask_clear_cpu(rq->cpu, old_rd->span);
7040 7041
7041 if (atomic_dec_and_test(&old_rd->refcount)) 7042 /*
7042 free_rootdomain(old_rd); 7043 * If we dont want to free the old_rt yet then
7044 * set old_rd to NULL to skip the freeing later
7045 * in this function:
7046 */
7047 if (!atomic_dec_and_test(&old_rd->refcount))
7048 old_rd = NULL;
7043 } 7049 }
7044 7050
7045 atomic_inc(&rd->refcount); 7051 atomic_inc(&rd->refcount);
@@ -7050,6 +7056,9 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
7050 set_rq_online(rq); 7056 set_rq_online(rq);
7051 7057
7052 spin_unlock_irqrestore(&rq->lock, flags); 7058 spin_unlock_irqrestore(&rq->lock, flags);
7059
7060 if (old_rd)
7061 free_rootdomain(old_rd);
7053} 7062}
7054 7063
7055static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem) 7064static int __init_refok init_rootdomain(struct root_domain *rd, bool bootmem)
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index e2a4ff6fc3a6..34e707e5ab87 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -52,6 +52,7 @@ config FUNCTION_TRACER
52 depends on HAVE_FUNCTION_TRACER 52 depends on HAVE_FUNCTION_TRACER
53 depends on DEBUG_KERNEL 53 depends on DEBUG_KERNEL
54 select FRAME_POINTER 54 select FRAME_POINTER
55 select KALLSYMS
55 select TRACING 56 select TRACING
56 select CONTEXT_SWITCH_TRACER 57 select CONTEXT_SWITCH_TRACER
57 help 58 help
@@ -238,6 +239,7 @@ config STACK_TRACER
238 depends on DEBUG_KERNEL 239 depends on DEBUG_KERNEL
239 select FUNCTION_TRACER 240 select FUNCTION_TRACER
240 select STACKTRACE 241 select STACKTRACE
242 select KALLSYMS
241 help 243 help
242 This special tracer records the maximum stack footprint of the 244 This special tracer records the maximum stack footprint of the
243 kernel and displays it in debugfs/tracing/stack_trace. 245 kernel and displays it in debugfs/tracing/stack_trace.
@@ -302,4 +304,27 @@ config FTRACE_STARTUP_TEST
302 functioning properly. It will do tests on all the configured 304 functioning properly. It will do tests on all the configured
303 tracers of ftrace. 305 tracers of ftrace.
304 306
307config MMIOTRACE
308 bool "Memory mapped IO tracing"
309 depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI
310 select TRACING
311 help
312 Mmiotrace traces Memory Mapped I/O access and is meant for
313 debugging and reverse engineering. It is called from the ioremap
314 implementation and works via page faults. Tracing is disabled by
315 default and can be enabled at run-time.
316
317 See Documentation/tracers/mmiotrace.txt.
318 If you are not helping to develop drivers, say N.
319
320config MMIOTRACE_TEST
321 tristate "Test module for mmiotrace"
322 depends on MMIOTRACE && m
323 help
324 This is a dumb module for testing mmiotrace. It is very dangerous
325 as it will write garbage to IO memory starting at a given address.
326 However, it should be safe to use on e.g. unused portion of VRAM.
327
328 Say N, unless you absolutely know what you are doing.
329
305endmenu 330endmenu
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9a236ffe2aa4..fdf913dfc7e8 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2033,7 +2033,7 @@ free:
2033static int start_graph_tracing(void) 2033static int start_graph_tracing(void)
2034{ 2034{
2035 struct ftrace_ret_stack **ret_stack_list; 2035 struct ftrace_ret_stack **ret_stack_list;
2036 int ret; 2036 int ret, cpu;
2037 2037
2038 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE * 2038 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2039 sizeof(struct ftrace_ret_stack *), 2039 sizeof(struct ftrace_ret_stack *),
@@ -2042,6 +2042,10 @@ static int start_graph_tracing(void)
2042 if (!ret_stack_list) 2042 if (!ret_stack_list)
2043 return -ENOMEM; 2043 return -ENOMEM;
2044 2044
2045 /* The cpu_boot init_task->ret_stack will never be freed */
2046 for_each_online_cpu(cpu)
2047 ftrace_graph_init_task(idle_task(cpu));
2048
2045 do { 2049 do {
2046 ret = alloc_retstack_tasklist(ret_stack_list); 2050 ret = alloc_retstack_tasklist(ret_stack_list);
2047 } while (ret == -EAGAIN); 2051 } while (ret == -EAGAIN);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index fffcb069f1dc..80e503ef6136 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/mmiotrace.h> 10#include <linux/mmiotrace.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <asm/atomic.h>
12 13
13#include "trace.h" 14#include "trace.h"
14 15
@@ -19,6 +20,7 @@ struct header_iter {
19static struct trace_array *mmio_trace_array; 20static struct trace_array *mmio_trace_array;
20static bool overrun_detected; 21static bool overrun_detected;
21static unsigned long prev_overruns; 22static unsigned long prev_overruns;
23static atomic_t dropped_count;
22 24
23static void mmio_reset_data(struct trace_array *tr) 25static void mmio_reset_data(struct trace_array *tr)
24{ 26{
@@ -121,11 +123,11 @@ static void mmio_close(struct trace_iterator *iter)
121 123
122static unsigned long count_overruns(struct trace_iterator *iter) 124static unsigned long count_overruns(struct trace_iterator *iter)
123{ 125{
124 unsigned long cnt = 0; 126 unsigned long cnt = atomic_xchg(&dropped_count, 0);
125 unsigned long over = ring_buffer_overruns(iter->tr->buffer); 127 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
126 128
127 if (over > prev_overruns) 129 if (over > prev_overruns)
128 cnt = over - prev_overruns; 130 cnt += over - prev_overruns;
129 prev_overruns = over; 131 prev_overruns = over;
130 return cnt; 132 return cnt;
131} 133}
@@ -310,8 +312,10 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
310 312
311 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 313 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
312 &irq_flags); 314 &irq_flags);
313 if (!event) 315 if (!event) {
316 atomic_inc(&dropped_count);
314 return; 317 return;
318 }
315 entry = ring_buffer_event_data(event); 319 entry = ring_buffer_event_data(event);
316 tracing_generic_entry_update(&entry->ent, 0, preempt_count()); 320 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
317 entry->ent.type = TRACE_MMIO_RW; 321 entry->ent.type = TRACE_MMIO_RW;
@@ -338,8 +342,10 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
338 342
339 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), 343 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
340 &irq_flags); 344 &irq_flags);
341 if (!event) 345 if (!event) {
346 atomic_inc(&dropped_count);
342 return; 347 return;
348 }
343 entry = ring_buffer_event_data(event); 349 entry = ring_buffer_event_data(event);
344 tracing_generic_entry_update(&entry->ent, 0, preempt_count()); 350 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
345 entry->ent.type = TRACE_MMIO_MAP; 351 entry->ent.type = TRACE_MMIO_MAP;
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 88c8eb70f54a..bc8e80a86bca 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -23,10 +23,20 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
23{ 23{
24 struct ring_buffer_event *event; 24 struct ring_buffer_event *event;
25 struct trace_entry *entry; 25 struct trace_entry *entry;
26 unsigned int loops = 0;
26 27
27 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { 28 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 entry = ring_buffer_event_data(event); 29 entry = ring_buffer_event_data(event);
29 30
31 /*
32 * The ring buffer is a size of trace_buf_size, if
33 * we loop more than the size, there's something wrong
34 * with the ring buffer.
35 */
36 if (loops++ > trace_buf_size) {
37 printk(KERN_CONT ".. bad ring buffer ");
38 goto failed;
39 }
30 if (!trace_valid_entry(entry)) { 40 if (!trace_valid_entry(entry)) {
31 printk(KERN_CONT ".. invalid entry %d ", 41 printk(KERN_CONT ".. invalid entry %d ",
32 entry->type); 42 entry->type);
@@ -57,11 +67,20 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
57 67
58 cnt = ring_buffer_entries(tr->buffer); 68 cnt = ring_buffer_entries(tr->buffer);
59 69
70 /*
71 * The trace_test_buffer_cpu runs a while loop to consume all data.
72 * If the calling tracer is broken, and is constantly filling
73 * the buffer, this will run forever, and hard lock the box.
74 * We disable the ring buffer while we do this test to prevent
75 * a hard lock up.
76 */
77 tracing_off();
60 for_each_possible_cpu(cpu) { 78 for_each_possible_cpu(cpu) {
61 ret = trace_test_buffer_cpu(tr, cpu); 79 ret = trace_test_buffer_cpu(tr, cpu);
62 if (ret) 80 if (ret)
63 break; 81 break;
64 } 82 }
83 tracing_on();
65 __raw_spin_unlock(&ftrace_max_lock); 84 __raw_spin_unlock(&ftrace_max_lock);
66 local_irq_restore(flags); 85 local_irq_restore(flags);
67 86