aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-02-03 11:52:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-02-03 11:52:05 -0500
commiteb487ab4d5af0caee81bfaaa5d87b55844f60145 (patch)
tree5dc5470b73aeb1f4bfff9e77d52ff3387c18e7df
parent0b0abeaf3d30cec03ac6497fe978b8f7edecc5ae (diff)
parent542e72fc90f5ed9eecb574f80f70868c7f296093 (diff)
Merge branch 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: perf: Fix reading in perf_event_read() watchdog: Don't change watchdog state on read of sysctl watchdog: Fix sysctl consistency watchdog: Fix broken nowatchdog logic perf: Fix Pentium4 raw event validation perf: Fix alloc_callchain_buffers()
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c12
-rw-r--r--kernel/perf_event.c10
-rw-r--r--kernel/watchdog.c43
3 files changed, 34 insertions, 31 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index e56b9bfbabd1..f7a0993c1e7c 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -682,7 +682,7 @@ static int p4_validate_raw_event(struct perf_event *event)
682 * if an event is shared accross the logical threads 682 * if an event is shared accross the logical threads
683 * the user needs special permissions to be able to use it 683 * the user needs special permissions to be able to use it
684 */ 684 */
685 if (p4_event_bind_map[v].shared) { 685 if (p4_ht_active() && p4_event_bind_map[v].shared) {
686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) 686 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
687 return -EACCES; 687 return -EACCES;
688 } 688 }
@@ -727,7 +727,8 @@ static int p4_hw_config(struct perf_event *event)
727 event->hw.config = p4_set_ht_bit(event->hw.config); 727 event->hw.config = p4_set_ht_bit(event->hw.config);
728 728
729 if (event->attr.type == PERF_TYPE_RAW) { 729 if (event->attr.type == PERF_TYPE_RAW) {
730 730 struct p4_event_bind *bind;
731 unsigned int esel;
731 /* 732 /*
732 * Clear bits we reserve to be managed by kernel itself 733 * Clear bits we reserve to be managed by kernel itself
733 * and never allowed from a user space 734 * and never allowed from a user space
@@ -743,6 +744,13 @@ static int p4_hw_config(struct perf_event *event)
743 * bits since we keep additional info here (for cache events and etc) 744 * bits since we keep additional info here (for cache events and etc)
744 */ 745 */
745 event->hw.config |= event->attr.config; 746 event->hw.config |= event->attr.config;
747 bind = p4_config_get_bind(event->attr.config);
748 if (!bind) {
749 rc = -EINVAL;
750 goto out;
751 }
752 esel = P4_OPCODE_ESEL(bind->opcode);
753 event->hw.config |= p4_config_pack_cccr(P4_CCCR_ESEL(esel));
746 } 754 }
747 755
748 rc = x86_setup_perfctr(event); 756 rc = x86_setup_perfctr(event);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 126a302c481c..999835b6112b 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1901,11 +1901,12 @@ static void __perf_event_read(void *info)
1901 return; 1901 return;
1902 1902
1903 raw_spin_lock(&ctx->lock); 1903 raw_spin_lock(&ctx->lock);
1904 update_context_time(ctx); 1904 if (ctx->is_active)
1905 update_context_time(ctx);
1905 update_event_times(event); 1906 update_event_times(event);
1907 if (event->state == PERF_EVENT_STATE_ACTIVE)
1908 event->pmu->read(event);
1906 raw_spin_unlock(&ctx->lock); 1909 raw_spin_unlock(&ctx->lock);
1907
1908 event->pmu->read(event);
1909} 1910}
1910 1911
1911static inline u64 perf_event_count(struct perf_event *event) 1912static inline u64 perf_event_count(struct perf_event *event)
@@ -1999,8 +2000,7 @@ static int alloc_callchain_buffers(void)
1999 * accessed from NMI. Use a temporary manual per cpu allocation 2000 * accessed from NMI. Use a temporary manual per cpu allocation
2000 * until that gets sorted out. 2001 * until that gets sorted out.
2001 */ 2002 */
2002 size = sizeof(*entries) + sizeof(struct perf_callchain_entry *) * 2003 size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
2003 num_possible_cpus();
2004 2004
2005 entries = kzalloc(size, GFP_KERNEL); 2005 entries = kzalloc(size, GFP_KERNEL);
2006 if (!entries) 2006 if (!entries)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d7ebdf4cea98..f37f974aa81b 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -27,7 +27,7 @@
27#include <asm/irq_regs.h> 27#include <asm/irq_regs.h>
28#include <linux/perf_event.h> 28#include <linux/perf_event.h>
29 29
30int watchdog_enabled; 30int watchdog_enabled = 1;
31int __read_mostly softlockup_thresh = 60; 31int __read_mostly softlockup_thresh = 60;
32 32
33static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 33static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
44#endif 44#endif
45 45
46static int no_watchdog;
47
48
49/* boot commands */ 46/* boot commands */
50/* 47/*
51 * Should we panic when a soft-lockup or hard-lockup occurs: 48 * Should we panic when a soft-lockup or hard-lockup occurs:
@@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str)
58 if (!strncmp(str, "panic", 5)) 55 if (!strncmp(str, "panic", 5))
59 hardlockup_panic = 1; 56 hardlockup_panic = 1;
60 else if (!strncmp(str, "0", 1)) 57 else if (!strncmp(str, "0", 1))
61 no_watchdog = 1; 58 watchdog_enabled = 0;
62 return 1; 59 return 1;
63} 60}
64__setup("nmi_watchdog=", hardlockup_panic_setup); 61__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
77 74
78static int __init nowatchdog_setup(char *str) 75static int __init nowatchdog_setup(char *str)
79{ 76{
80 no_watchdog = 1; 77 watchdog_enabled = 0;
81 return 1; 78 return 1;
82} 79}
83__setup("nowatchdog", nowatchdog_setup); 80__setup("nowatchdog", nowatchdog_setup);
@@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup);
85/* deprecated */ 82/* deprecated */
86static int __init nosoftlockup_setup(char *str) 83static int __init nosoftlockup_setup(char *str)
87{ 84{
88 no_watchdog = 1; 85 watchdog_enabled = 0;
89 return 1; 86 return 1;
90} 87}
91__setup("nosoftlockup", nosoftlockup_setup); 88__setup("nosoftlockup", nosoftlockup_setup);
@@ -432,9 +429,6 @@ static int watchdog_enable(int cpu)
432 wake_up_process(p); 429 wake_up_process(p);
433 } 430 }
434 431
435 /* if any cpu succeeds, watchdog is considered enabled for the system */
436 watchdog_enabled = 1;
437
438 return 0; 432 return 0;
439} 433}
440 434
@@ -462,12 +456,16 @@ static void watchdog_disable(int cpu)
462static void watchdog_enable_all_cpus(void) 456static void watchdog_enable_all_cpus(void)
463{ 457{
464 int cpu; 458 int cpu;
465 int result = 0; 459
460 watchdog_enabled = 0;
466 461
467 for_each_online_cpu(cpu) 462 for_each_online_cpu(cpu)
468 result += watchdog_enable(cpu); 463 if (!watchdog_enable(cpu))
464 /* if any cpu succeeds, watchdog is considered
465 enabled for the system */
466 watchdog_enabled = 1;
469 467
470 if (result) 468 if (!watchdog_enabled)
471 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); 469 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n");
472 470
473} 471}
@@ -476,9 +474,6 @@ static void watchdog_disable_all_cpus(void)
476{ 474{
477 int cpu; 475 int cpu;
478 476
479 if (no_watchdog)
480 return;
481
482 for_each_online_cpu(cpu) 477 for_each_online_cpu(cpu)
483 watchdog_disable(cpu); 478 watchdog_disable(cpu);
484 479
@@ -498,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write,
498{ 493{
499 proc_dointvec(table, write, buffer, length, ppos); 494 proc_dointvec(table, write, buffer, length, ppos);
500 495
501 if (watchdog_enabled) 496 if (write) {
502 watchdog_enable_all_cpus(); 497 if (watchdog_enabled)
503 else 498 watchdog_enable_all_cpus();
504 watchdog_disable_all_cpus(); 499 else
500 watchdog_disable_all_cpus();
501 }
505 return 0; 502 return 0;
506} 503}
507 504
@@ -530,7 +527,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
530 break; 527 break;
531 case CPU_ONLINE: 528 case CPU_ONLINE:
532 case CPU_ONLINE_FROZEN: 529 case CPU_ONLINE_FROZEN:
533 err = watchdog_enable(hotcpu); 530 if (watchdog_enabled)
531 err = watchdog_enable(hotcpu);
534 break; 532 break;
535#ifdef CONFIG_HOTPLUG_CPU 533#ifdef CONFIG_HOTPLUG_CPU
536 case CPU_UP_CANCELED: 534 case CPU_UP_CANCELED:
@@ -555,9 +553,6 @@ void __init lockup_detector_init(void)
555 void *cpu = (void *)(long)smp_processor_id(); 553 void *cpu = (void *)(long)smp_processor_id();
556 int err; 554 int err;
557 555
558 if (no_watchdog)
559 return;
560
561 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 556 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
562 WARN_ON(notifier_to_errno(err)); 557 WARN_ON(notifier_to_errno(err));
563 558