aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-06 09:51:45 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:31 -0400
commit15ac9a395a753cb28c674e7ea80386ffdff21785 (patch)
tree63879e3031a6ed8e372ffd254ef97ff703a4d478 /arch
parenta4eaf7f14675cb512d69f0c928055e73d0c6d252 (diff)
perf: Remove the sysfs bits
Neither the overcommit nor the reservation sysfs parameter were actually working, remove them as they'll only get in the way. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event.c9
-rw-r--r--arch/sparc/kernel/perf_event.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c1
4 files changed, 5 insertions, 17 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 380ef02d557a..9bb8c024080c 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -808,7 +808,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
808 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 808 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
809 809
810 /* la_ptr is the counter that overflowed. */ 810 /* la_ptr is the counter that overflowed. */
811 if (unlikely(la_ptr >= perf_max_events)) { 811 if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
812 /* This should never occur! */ 812 /* This should never occur! */
813 irq_err_count++; 813 irq_err_count++;
814 pr_warning("PMI: silly index %ld\n", la_ptr); 814 pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -879,7 +879,6 @@ void __init init_hw_perf_events(void)
879 879
880 /* And set up PMU specification */ 880 /* And set up PMU specification */
881 alpha_pmu = &ev67_pmu; 881 alpha_pmu = &ev67_pmu;
882 perf_max_events = alpha_pmu->num_pmcs;
883 882
884 perf_pmu_register(&pmu); 883 perf_pmu_register(&pmu);
885} 884}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 448cfa6b3ef0..45d6a35217c1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -534,7 +534,7 @@ static int armpmu_event_init(struct perf_event *event)
534 event->destroy = hw_perf_event_destroy; 534 event->destroy = hw_perf_event_destroy;
535 535
536 if (!atomic_inc_not_zero(&active_events)) { 536 if (!atomic_inc_not_zero(&active_events)) {
537 if (atomic_read(&active_events) > perf_max_events) { 537 if (atomic_read(&active_events) > armpmu.num_events) {
538 atomic_dec(&active_events); 538 atomic_dec(&active_events);
539 return -ENOSPC; 539 return -ENOSPC;
540 } 540 }
@@ -2974,14 +2974,12 @@ init_hw_perf_events(void)
2974 armpmu = &armv6pmu; 2974 armpmu = &armv6pmu;
2975 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, 2975 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2976 sizeof(armv6_perf_cache_map)); 2976 sizeof(armv6_perf_cache_map));
2977 perf_max_events = armv6pmu.num_events;
2978 break; 2977 break;
2979 case 0xB020: /* ARM11mpcore */ 2978 case 0xB020: /* ARM11mpcore */
2980 armpmu = &armv6mpcore_pmu; 2979 armpmu = &armv6mpcore_pmu;
2981 memcpy(armpmu_perf_cache_map, 2980 memcpy(armpmu_perf_cache_map,
2982 armv6mpcore_perf_cache_map, 2981 armv6mpcore_perf_cache_map,
2983 sizeof(armv6mpcore_perf_cache_map)); 2982 sizeof(armv6mpcore_perf_cache_map));
2984 perf_max_events = armv6mpcore_pmu.num_events;
2985 break; 2983 break;
2986 case 0xC080: /* Cortex-A8 */ 2984 case 0xC080: /* Cortex-A8 */
2987 armv7pmu.id = ARM_PERF_PMU_ID_CA8; 2985 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2993,7 +2991,6 @@ init_hw_perf_events(void)
2993 /* Reset PMNC and read the nb of CNTx counters 2991 /* Reset PMNC and read the nb of CNTx counters
2994 supported */ 2992 supported */
2995 armv7pmu.num_events = armv7_reset_read_pmnc(); 2993 armv7pmu.num_events = armv7_reset_read_pmnc();
2996 perf_max_events = armv7pmu.num_events;
2997 break; 2994 break;
2998 case 0xC090: /* Cortex-A9 */ 2995 case 0xC090: /* Cortex-A9 */
2999 armv7pmu.id = ARM_PERF_PMU_ID_CA9; 2996 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -3005,7 +3002,6 @@ init_hw_perf_events(void)
3005 /* Reset PMNC and read the nb of CNTx counters 3002 /* Reset PMNC and read the nb of CNTx counters
3006 supported */ 3003 supported */
3007 armv7pmu.num_events = armv7_reset_read_pmnc(); 3004 armv7pmu.num_events = armv7_reset_read_pmnc();
3008 perf_max_events = armv7pmu.num_events;
3009 break; 3005 break;
3010 } 3006 }
3011 /* Intel CPUs [xscale]. */ 3007 /* Intel CPUs [xscale]. */
@@ -3016,13 +3012,11 @@ init_hw_perf_events(void)
3016 armpmu = &xscale1pmu; 3012 armpmu = &xscale1pmu;
3017 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, 3013 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
3018 sizeof(xscale_perf_cache_map)); 3014 sizeof(xscale_perf_cache_map));
3019 perf_max_events = xscale1pmu.num_events;
3020 break; 3015 break;
3021 case 2: 3016 case 2:
3022 armpmu = &xscale2pmu; 3017 armpmu = &xscale2pmu;
3023 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, 3018 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
3024 sizeof(xscale_perf_cache_map)); 3019 sizeof(xscale_perf_cache_map));
3025 perf_max_events = xscale2pmu.num_events;
3026 break; 3020 break;
3027 } 3021 }
3028 } 3022 }
@@ -3032,7 +3026,6 @@ init_hw_perf_events(void)
3032 arm_pmu_names[armpmu->id], armpmu->num_events); 3026 arm_pmu_names[armpmu->id], armpmu->num_events);
3033 } else { 3027 } else {
3034 pr_info("no hardware support available\n"); 3028 pr_info("no hardware support available\n");
3035 perf_max_events = -1;
3036 } 3029 }
3037 3030
3038 perf_pmu_register(&pmu); 3031 perf_pmu_register(&pmu);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 516be2314b54..f9a706759364 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -897,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
897 if (!n_ev) 897 if (!n_ev)
898 return 0; 898 return 0;
899 899
900 if (n_ev > perf_max_events) 900 if (n_ev > MAX_HWEVENTS)
901 return -1; 901 return -1;
902 902
903 msk0 = perf_event_get_msk(events[0]); 903 msk0 = perf_event_get_msk(events[0]);
@@ -1014,7 +1014,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1014 perf_pmu_disable(event->pmu); 1014 perf_pmu_disable(event->pmu);
1015 1015
1016 n0 = cpuc->n_events; 1016 n0 = cpuc->n_events;
1017 if (n0 >= perf_max_events) 1017 if (n0 >= MAX_HWEVENTS)
1018 goto out; 1018 goto out;
1019 1019
1020 cpuc->event[n0] = event; 1020 cpuc->event[n0] = event;
@@ -1097,7 +1097,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
1097 n = 0; 1097 n = 0;
1098 if (event->group_leader != event) { 1098 if (event->group_leader != event) {
1099 n = collect_events(event->group_leader, 1099 n = collect_events(event->group_leader,
1100 perf_max_events - 1, 1100 MAX_HWEVENTS - 1,
1101 evts, events, current_idx_dmy); 1101 evts, events, current_idx_dmy);
1102 if (n < 0) 1102 if (n < 0)
1103 return -EINVAL; 1103 return -EINVAL;
@@ -1309,9 +1309,6 @@ void __init init_hw_perf_events(void)
1309 1309
1310 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1310 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1311 1311
1312 /* All sparc64 PMUs currently have 2 events. */
1313 perf_max_events = 2;
1314
1315 perf_pmu_register(&pmu); 1312 perf_pmu_register(&pmu);
1316 register_die_notifier(&perf_event_nmi_notifier); 1313 register_die_notifier(&perf_event_nmi_notifier);
1317} 1314}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index dd6fec710677..0fb17050360f 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1396,7 +1396,6 @@ void __init init_hw_perf_events(void)
1396 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 1396 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1397 } 1397 }
1398 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; 1398 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1399 perf_max_events = x86_pmu.num_counters;
1400 1399
1401 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 1400 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1402 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 1401 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",