aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/perf_event.c3
-rw-r--r--arch/arm/kernel/perf_event.c9
-rw-r--r--arch/sparc/kernel/perf_event.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c1
-rw-r--r--include/linux/perf_event.h6
-rw-r--r--kernel/perf_event.c124
6 files changed, 5 insertions, 147 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index 380ef02d557a..9bb8c024080c 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -808,7 +808,7 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
808 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask); 808 wrperfmon(PERFMON_CMD_DISABLE, cpuc->idx_mask);
809 809
810 /* la_ptr is the counter that overflowed. */ 810 /* la_ptr is the counter that overflowed. */
811 if (unlikely(la_ptr >= perf_max_events)) { 811 if (unlikely(la_ptr >= alpha_pmu->num_pmcs)) {
812 /* This should never occur! */ 812 /* This should never occur! */
813 irq_err_count++; 813 irq_err_count++;
814 pr_warning("PMI: silly index %ld\n", la_ptr); 814 pr_warning("PMI: silly index %ld\n", la_ptr);
@@ -879,7 +879,6 @@ void __init init_hw_perf_events(void)
879 879
880 /* And set up PMU specification */ 880 /* And set up PMU specification */
881 alpha_pmu = &ev67_pmu; 881 alpha_pmu = &ev67_pmu;
882 perf_max_events = alpha_pmu->num_pmcs;
883 882
884 perf_pmu_register(&pmu); 883 perf_pmu_register(&pmu);
885} 884}
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 448cfa6b3ef0..45d6a35217c1 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -534,7 +534,7 @@ static int armpmu_event_init(struct perf_event *event)
534 event->destroy = hw_perf_event_destroy; 534 event->destroy = hw_perf_event_destroy;
535 535
536 if (!atomic_inc_not_zero(&active_events)) { 536 if (!atomic_inc_not_zero(&active_events)) {
537 if (atomic_read(&active_events) > perf_max_events) { 537 if (atomic_read(&active_events) > armpmu.num_events) {
538 atomic_dec(&active_events); 538 atomic_dec(&active_events);
539 return -ENOSPC; 539 return -ENOSPC;
540 } 540 }
@@ -2974,14 +2974,12 @@ init_hw_perf_events(void)
2974 armpmu = &armv6pmu; 2974 armpmu = &armv6pmu;
2975 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, 2975 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2976 sizeof(armv6_perf_cache_map)); 2976 sizeof(armv6_perf_cache_map));
2977 perf_max_events = armv6pmu.num_events;
2978 break; 2977 break;
2979 case 0xB020: /* ARM11mpcore */ 2978 case 0xB020: /* ARM11mpcore */
2980 armpmu = &armv6mpcore_pmu; 2979 armpmu = &armv6mpcore_pmu;
2981 memcpy(armpmu_perf_cache_map, 2980 memcpy(armpmu_perf_cache_map,
2982 armv6mpcore_perf_cache_map, 2981 armv6mpcore_perf_cache_map,
2983 sizeof(armv6mpcore_perf_cache_map)); 2982 sizeof(armv6mpcore_perf_cache_map));
2984 perf_max_events = armv6mpcore_pmu.num_events;
2985 break; 2983 break;
2986 case 0xC080: /* Cortex-A8 */ 2984 case 0xC080: /* Cortex-A8 */
2987 armv7pmu.id = ARM_PERF_PMU_ID_CA8; 2985 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
@@ -2993,7 +2991,6 @@ init_hw_perf_events(void)
2993 /* Reset PMNC and read the nb of CNTx counters 2991 /* Reset PMNC and read the nb of CNTx counters
2994 supported */ 2992 supported */
2995 armv7pmu.num_events = armv7_reset_read_pmnc(); 2993 armv7pmu.num_events = armv7_reset_read_pmnc();
2996 perf_max_events = armv7pmu.num_events;
2997 break; 2994 break;
2998 case 0xC090: /* Cortex-A9 */ 2995 case 0xC090: /* Cortex-A9 */
2999 armv7pmu.id = ARM_PERF_PMU_ID_CA9; 2996 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
@@ -3005,7 +3002,6 @@ init_hw_perf_events(void)
3005 /* Reset PMNC and read the nb of CNTx counters 3002 /* Reset PMNC and read the nb of CNTx counters
3006 supported */ 3003 supported */
3007 armv7pmu.num_events = armv7_reset_read_pmnc(); 3004 armv7pmu.num_events = armv7_reset_read_pmnc();
3008 perf_max_events = armv7pmu.num_events;
3009 break; 3005 break;
3010 } 3006 }
3011 /* Intel CPUs [xscale]. */ 3007 /* Intel CPUs [xscale]. */
@@ -3016,13 +3012,11 @@ init_hw_perf_events(void)
3016 armpmu = &xscale1pmu; 3012 armpmu = &xscale1pmu;
3017 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, 3013 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
3018 sizeof(xscale_perf_cache_map)); 3014 sizeof(xscale_perf_cache_map));
3019 perf_max_events = xscale1pmu.num_events;
3020 break; 3015 break;
3021 case 2: 3016 case 2:
3022 armpmu = &xscale2pmu; 3017 armpmu = &xscale2pmu;
3023 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, 3018 memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
3024 sizeof(xscale_perf_cache_map)); 3019 sizeof(xscale_perf_cache_map));
3025 perf_max_events = xscale2pmu.num_events;
3026 break; 3020 break;
3027 } 3021 }
3028 } 3022 }
@@ -3032,7 +3026,6 @@ init_hw_perf_events(void)
3032 arm_pmu_names[armpmu->id], armpmu->num_events); 3026 arm_pmu_names[armpmu->id], armpmu->num_events);
3033 } else { 3027 } else {
3034 pr_info("no hardware support available\n"); 3028 pr_info("no hardware support available\n");
3035 perf_max_events = -1;
3036 } 3029 }
3037 3030
3038 perf_pmu_register(&pmu); 3031 perf_pmu_register(&pmu);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 516be2314b54..f9a706759364 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -897,7 +897,7 @@ static int sparc_check_constraints(struct perf_event **evts,
897 if (!n_ev) 897 if (!n_ev)
898 return 0; 898 return 0;
899 899
900 if (n_ev > perf_max_events) 900 if (n_ev > MAX_HWEVENTS)
901 return -1; 901 return -1;
902 902
903 msk0 = perf_event_get_msk(events[0]); 903 msk0 = perf_event_get_msk(events[0]);
@@ -1014,7 +1014,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags)
1014 perf_pmu_disable(event->pmu); 1014 perf_pmu_disable(event->pmu);
1015 1015
1016 n0 = cpuc->n_events; 1016 n0 = cpuc->n_events;
1017 if (n0 >= perf_max_events) 1017 if (n0 >= MAX_HWEVENTS)
1018 goto out; 1018 goto out;
1019 1019
1020 cpuc->event[n0] = event; 1020 cpuc->event[n0] = event;
@@ -1097,7 +1097,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
1097 n = 0; 1097 n = 0;
1098 if (event->group_leader != event) { 1098 if (event->group_leader != event) {
1099 n = collect_events(event->group_leader, 1099 n = collect_events(event->group_leader,
1100 perf_max_events - 1, 1100 MAX_HWEVENTS - 1,
1101 evts, events, current_idx_dmy); 1101 evts, events, current_idx_dmy);
1102 if (n < 0) 1102 if (n < 0)
1103 return -EINVAL; 1103 return -EINVAL;
@@ -1309,9 +1309,6 @@ void __init init_hw_perf_events(void)
1309 1309
1310 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); 1310 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1311 1311
1312 /* All sparc64 PMUs currently have 2 events. */
1313 perf_max_events = 2;
1314
1315 perf_pmu_register(&pmu); 1312 perf_pmu_register(&pmu);
1316 register_die_notifier(&perf_event_nmi_notifier); 1313 register_die_notifier(&perf_event_nmi_notifier);
1317} 1314}
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index dd6fec710677..0fb17050360f 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1396,7 +1396,6 @@ void __init init_hw_perf_events(void)
1396 x86_pmu.num_counters = X86_PMC_MAX_GENERIC; 1396 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
1397 } 1397 }
1398 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; 1398 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
1399 perf_max_events = x86_pmu.num_counters;
1400 1399
1401 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) { 1400 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1402 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!", 1401 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 402073c61669..b22176d3ebdf 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -860,7 +860,6 @@ struct perf_cpu_context {
860 struct perf_event_context ctx; 860 struct perf_event_context ctx;
861 struct perf_event_context *task_ctx; 861 struct perf_event_context *task_ctx;
862 int active_oncpu; 862 int active_oncpu;
863 int max_pertask;
864 int exclusive; 863 int exclusive;
865 struct swevent_hlist *swevent_hlist; 864 struct swevent_hlist *swevent_hlist;
866 struct mutex hlist_mutex; 865 struct mutex hlist_mutex;
@@ -883,11 +882,6 @@ struct perf_output_handle {
883 882
884#ifdef CONFIG_PERF_EVENTS 883#ifdef CONFIG_PERF_EVENTS
885 884
886/*
887 * Set by architecture code:
888 */
889extern int perf_max_events;
890
891extern int perf_pmu_register(struct pmu *pmu); 885extern int perf_pmu_register(struct pmu *pmu);
892extern void perf_pmu_unregister(struct pmu *pmu); 886extern void perf_pmu_unregister(struct pmu *pmu);
893 887
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 3bace4fd0355..8462e69409ae 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -39,10 +39,6 @@
39 */ 39 */
40static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); 40static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context);
41 41
42int perf_max_events __read_mostly = 1;
43static int perf_reserved_percpu __read_mostly;
44static int perf_overcommit __read_mostly = 1;
45
46static atomic_t nr_events __read_mostly; 42static atomic_t nr_events __read_mostly;
47static atomic_t nr_mmap_events __read_mostly; 43static atomic_t nr_mmap_events __read_mostly;
48static atomic_t nr_comm_events __read_mostly; 44static atomic_t nr_comm_events __read_mostly;
@@ -66,11 +62,6 @@ int sysctl_perf_event_sample_rate __read_mostly = 100000;
66 62
67static atomic64_t perf_event_id; 63static atomic64_t perf_event_id;
68 64
69/*
70 * Lock for (sysadmin-configurable) event reservations:
71 */
72static DEFINE_SPINLOCK(perf_resource_lock);
73
74void __weak perf_event_print_debug(void) { } 65void __weak perf_event_print_debug(void) { }
75 66
76void perf_pmu_disable(struct pmu *pmu) 67void perf_pmu_disable(struct pmu *pmu)
@@ -480,16 +471,6 @@ static void __perf_event_remove_from_context(void *info)
480 471
481 list_del_event(event, ctx); 472 list_del_event(event, ctx);
482 473
483 if (!ctx->task) {
484 /*
485 * Allow more per task events with respect to the
486 * reservation:
487 */
488 cpuctx->max_pertask =
489 min(perf_max_events - ctx->nr_events,
490 perf_max_events - perf_reserved_percpu);
491 }
492
493 raw_spin_unlock(&ctx->lock); 474 raw_spin_unlock(&ctx->lock);
494} 475}
495 476
@@ -823,9 +804,6 @@ static void __perf_install_in_context(void *info)
823 } 804 }
824 } 805 }
825 806
826 if (!err && !ctx->task && cpuctx->max_pertask)
827 cpuctx->max_pertask--;
828
829unlock: 807unlock:
830 raw_spin_unlock(&ctx->lock); 808 raw_spin_unlock(&ctx->lock);
831} 809}
@@ -5930,10 +5908,6 @@ static void __cpuinit perf_event_init_cpu(int cpu)
5930 5908
5931 cpuctx = &per_cpu(perf_cpu_context, cpu); 5909 cpuctx = &per_cpu(perf_cpu_context, cpu);
5932 5910
5933 spin_lock(&perf_resource_lock);
5934 cpuctx->max_pertask = perf_max_events - perf_reserved_percpu;
5935 spin_unlock(&perf_resource_lock);
5936
5937 mutex_lock(&cpuctx->hlist_mutex); 5911 mutex_lock(&cpuctx->hlist_mutex);
5938 if (cpuctx->hlist_refcount > 0) { 5912 if (cpuctx->hlist_refcount > 0) {
5939 struct swevent_hlist *hlist; 5913 struct swevent_hlist *hlist;
@@ -6008,101 +5982,3 @@ void __init perf_event_init(void)
6008 perf_tp_register(); 5982 perf_tp_register();
6009 perf_cpu_notifier(perf_cpu_notify); 5983 perf_cpu_notifier(perf_cpu_notify);
6010} 5984}
6011
6012static ssize_t perf_show_reserve_percpu(struct sysdev_class *class,
6013 struct sysdev_class_attribute *attr,
6014 char *buf)
6015{
6016 return sprintf(buf, "%d\n", perf_reserved_percpu);
6017}
6018
6019static ssize_t
6020perf_set_reserve_percpu(struct sysdev_class *class,
6021 struct sysdev_class_attribute *attr,
6022 const char *buf,
6023 size_t count)
6024{
6025 struct perf_cpu_context *cpuctx;
6026 unsigned long val;
6027 int err, cpu, mpt;
6028
6029 err = strict_strtoul(buf, 10, &val);
6030 if (err)
6031 return err;
6032 if (val > perf_max_events)
6033 return -EINVAL;
6034
6035 spin_lock(&perf_resource_lock);
6036 perf_reserved_percpu = val;
6037 for_each_online_cpu(cpu) {
6038 cpuctx = &per_cpu(perf_cpu_context, cpu);
6039 raw_spin_lock_irq(&cpuctx->ctx.lock);
6040 mpt = min(perf_max_events - cpuctx->ctx.nr_events,
6041 perf_max_events - perf_reserved_percpu);
6042 cpuctx->max_pertask = mpt;
6043 raw_spin_unlock_irq(&cpuctx->ctx.lock);
6044 }
6045 spin_unlock(&perf_resource_lock);
6046
6047 return count;
6048}
6049
6050static ssize_t perf_show_overcommit(struct sysdev_class *class,
6051 struct sysdev_class_attribute *attr,
6052 char *buf)
6053{
6054 return sprintf(buf, "%d\n", perf_overcommit);
6055}
6056
6057static ssize_t
6058perf_set_overcommit(struct sysdev_class *class,
6059 struct sysdev_class_attribute *attr,
6060 const char *buf, size_t count)
6061{
6062 unsigned long val;
6063 int err;
6064
6065 err = strict_strtoul(buf, 10, &val);
6066 if (err)
6067 return err;
6068 if (val > 1)
6069 return -EINVAL;
6070
6071 spin_lock(&perf_resource_lock);
6072 perf_overcommit = val;
6073 spin_unlock(&perf_resource_lock);
6074
6075 return count;
6076}
6077
6078static SYSDEV_CLASS_ATTR(
6079 reserve_percpu,
6080 0644,
6081 perf_show_reserve_percpu,
6082 perf_set_reserve_percpu
6083 );
6084
6085static SYSDEV_CLASS_ATTR(
6086 overcommit,
6087 0644,
6088 perf_show_overcommit,
6089 perf_set_overcommit
6090 );
6091
6092static struct attribute *perfclass_attrs[] = {
6093 &attr_reserve_percpu.attr,
6094 &attr_overcommit.attr,
6095 NULL
6096};
6097
6098static struct attribute_group perfclass_attr_group = {
6099 .attrs = perfclass_attrs,
6100 .name = "perf_events",
6101};
6102
6103static int __init perf_event_sysfs_init(void)
6104{
6105 return sysfs_create_group(&cpu_sysdev_class.kset.kobj,
6106 &perfclass_attr_group);
6107}
6108device_initcall(perf_event_sysfs_init);