aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-05-27 08:08:09 -0400
committerIngo Molnar <mingo@elte.hu>2011-05-27 08:08:09 -0400
commitb1d2dc3c06d8da7d58fb43d7123a91c1d6a4f576 (patch)
tree425e774abcf88bad87ba4e25ccae7a9f49b56aab /arch
parent75911c9bd1134f8c0b682aa1e8a8dbefec3ca07a (diff)
parentb76a06e08d94b2a63e47837dfe46bbbf0a3af6c2 (diff)
Merge branch 'urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rric/oprofile into perf/urgent
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c24
-rw-r--r--arch/x86/oprofile/op_model_amd.c95
2 files changed, 77 insertions, 42 deletions
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index 8ee51a252cf1..e6bec74be131 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra)
261 return is_kernel; 261 return is_kernel;
262} 262}
263 263
264static bool pmc_overflow(unsigned long val)
265{
266 if ((int)val < 0)
267 return true;
268
269 /*
270 * Events on POWER7 can roll back if a speculative event doesn't
271 * eventually complete. Unfortunately in some rare cases they will
272 * raise a performance monitor exception. We need to catch this to
273 * ensure we reset the PMC. In all cases the PMC will be 256 or less
274 * cycles from overflow.
275 *
276 * We only do this if the first pass fails to find any overflowing
277 * PMCs because a user might set a period of less than 256 and we
278 * don't want to mistakenly reset them.
279 */
280 if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256))
281 return true;
282
283 return false;
284}
285
264static void power4_handle_interrupt(struct pt_regs *regs, 286static void power4_handle_interrupt(struct pt_regs *regs,
265 struct op_counter_config *ctr) 287 struct op_counter_config *ctr)
266{ 288{
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs,
281 303
282 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { 304 for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) {
283 val = classic_ctr_read(i); 305 val = classic_ctr_read(i);
284 if (val < 0) { 306 if (pmc_overflow(val)) {
285 if (oprofile_running && ctr[i].enabled) { 307 if (oprofile_running && ctr[i].enabled) {
286 oprofile_add_ext_sample(pc, regs, i, is_kernel); 308 oprofile_add_ext_sample(pc, regs, i, is_kernel);
287 classic_ctr_write(i, reset_value[i]); 309 classic_ctr_write(i, reset_value[i]);
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index c3b8e24f2b16..9fd8a567fe1e 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -316,16 +316,23 @@ static void op_amd_stop_ibs(void)
316 wrmsrl(MSR_AMD64_IBSOPCTL, 0); 316 wrmsrl(MSR_AMD64_IBSOPCTL, 0);
317} 317}
318 318
319static inline int eilvt_is_available(int offset) 319static inline int get_eilvt(int offset)
320{ 320{
321 /* check if we may assign a vector */
322 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1); 321 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
323} 322}
324 323
324static inline int put_eilvt(int offset)
325{
326 return !setup_APIC_eilvt(offset, 0, 0, 1);
327}
328
325static inline int ibs_eilvt_valid(void) 329static inline int ibs_eilvt_valid(void)
326{ 330{
327 int offset; 331 int offset;
328 u64 val; 332 u64 val;
333 int valid = 0;
334
335 preempt_disable();
329 336
330 rdmsrl(MSR_AMD64_IBSCTL, val); 337 rdmsrl(MSR_AMD64_IBSCTL, val);
331 offset = val & IBSCTL_LVT_OFFSET_MASK; 338 offset = val & IBSCTL_LVT_OFFSET_MASK;
@@ -333,16 +340,20 @@ static inline int ibs_eilvt_valid(void)
333 if (!(val & IBSCTL_LVT_OFFSET_VALID)) { 340 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
334 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n", 341 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
335 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); 342 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
336 return 0; 343 goto out;
337 } 344 }
338 345
339 if (!eilvt_is_available(offset)) { 346 if (!get_eilvt(offset)) {
340 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n", 347 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
341 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val); 348 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
342 return 0; 349 goto out;
343 } 350 }
344 351
345 return 1; 352 valid = 1;
353out:
354 preempt_enable();
355
356 return valid;
346} 357}
347 358
348static inline int get_ibs_offset(void) 359static inline int get_ibs_offset(void)
@@ -600,67 +611,69 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
600 611
601static int force_ibs_eilvt_setup(void) 612static int force_ibs_eilvt_setup(void)
602{ 613{
603 int i; 614 int offset;
604 int ret; 615 int ret;
605 616
606 /* find the next free available EILVT entry */ 617 /*
607 for (i = 1; i < 4; i++) { 618 * find the next free available EILVT entry, skip offset 0,
608 if (!eilvt_is_available(i)) 619 * pin search to this cpu
609 continue; 620 */
610 ret = setup_ibs_ctl(i); 621 preempt_disable();
611 if (ret) 622 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
612 return ret; 623 if (get_eilvt(offset))
613 pr_err(FW_BUG "using offset %d for IBS interrupts\n", i); 624 break;
614 return 0;
615 } 625 }
626 preempt_enable();
616 627
617 printk(KERN_DEBUG "No EILVT entry available\n"); 628 if (offset == APIC_EILVT_NR_MAX) {
618 629 printk(KERN_DEBUG "No EILVT entry available\n");
619 return -EBUSY; 630 return -EBUSY;
620} 631 }
621
622static int __init_ibs_nmi(void)
623{
624 int ret;
625
626 if (ibs_eilvt_valid())
627 return 0;
628 632
629 ret = force_ibs_eilvt_setup(); 633 ret = setup_ibs_ctl(offset);
630 if (ret) 634 if (ret)
631 return ret; 635 goto out;
632 636
633 if (!ibs_eilvt_valid()) 637 if (!ibs_eilvt_valid()) {
634 return -EFAULT; 638 ret = -EFAULT;
639 goto out;
640 }
635 641
642 pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
636 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n"); 643 pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
637 644
638 return 0; 645 return 0;
646out:
647 preempt_disable();
648 put_eilvt(offset);
649 preempt_enable();
650 return ret;
639} 651}
640 652
641/* 653/*
642 * check and reserve APIC extended interrupt LVT offset for IBS if 654 * check and reserve APIC extended interrupt LVT offset for IBS if
643 * available 655 * available
644 *
645 * init_ibs() preforms implicitly cpu-local operations, so pin this
646 * thread to its current CPU
647 */ 656 */
648 657
649static void init_ibs(void) 658static void init_ibs(void)
650{ 659{
651 preempt_disable();
652
653 ibs_caps = get_ibs_caps(); 660 ibs_caps = get_ibs_caps();
661
654 if (!ibs_caps) 662 if (!ibs_caps)
663 return;
664
665 if (ibs_eilvt_valid())
655 goto out; 666 goto out;
656 667
657 if (__init_ibs_nmi() < 0) 668 if (!force_ibs_eilvt_setup())
658 ibs_caps = 0; 669 goto out;
659 else 670
660 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps); 671 /* Failed to setup ibs */
672 ibs_caps = 0;
673 return;
661 674
662out: 675out:
663 preempt_enable(); 676 printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
664} 677}
665 678
666static int (*create_arch_files)(struct super_block *sb, struct dentry *root); 679static int (*create_arch_files)(struct super_block *sb, struct dentry *root);