aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_intel_ds.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-19 08:55:33 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-22 08:18:26 -0400
commit96681fc3c9e7d1f89ab64e5eec40b6467c97680f (patch)
tree2c161656d3f1641c4c2156652440960e3f866cf2 /arch/x86/kernel/cpu/perf_event_intel_ds.c
parentf80c9e304b8e8062230b0cda2c2fdd586149c771 (diff)
perf, x86: Use NUMA aware allocations for PEBS/BTS/DS allocations
For performance reasons its best to use memory node local memory for per-cpu buffers. This logic comes from a much larger patch proposed by Stephane. Suggested-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Stephane Eranian <eranian@google.com> LKML-Reference: <20101019134808.514465326@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel_ds.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 8a7f81cbd617..b7dcd9f2b8a0 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -77,13 +77,14 @@ static void fini_debug_store_on_cpu(int cpu)
77static int alloc_pebs_buffer(int cpu) 77static int alloc_pebs_buffer(int cpu)
78{ 78{
79 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 79 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
80 int node = cpu_to_node(cpu);
80 int max, thresh = 1; /* always use a single PEBS record */ 81 int max, thresh = 1; /* always use a single PEBS record */
81 void *buffer; 82 void *buffer;
82 83
83 if (!x86_pmu.pebs) 84 if (!x86_pmu.pebs)
84 return 0; 85 return 0;
85 86
86 buffer = kzalloc(PEBS_BUFFER_SIZE, GFP_KERNEL); 87 buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
87 if (unlikely(!buffer)) 88 if (unlikely(!buffer))
88 return -ENOMEM; 89 return -ENOMEM;
89 90
@@ -114,13 +115,14 @@ static void release_pebs_buffer(int cpu)
114static int alloc_bts_buffer(int cpu) 115static int alloc_bts_buffer(int cpu)
115{ 116{
116 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; 117 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
118 int node = cpu_to_node(cpu);
117 int max, thresh; 119 int max, thresh;
118 void *buffer; 120 void *buffer;
119 121
120 if (!x86_pmu.bts) 122 if (!x86_pmu.bts)
121 return 0; 123 return 0;
122 124
123 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL); 125 buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
124 if (unlikely(!buffer)) 126 if (unlikely(!buffer))
125 return -ENOMEM; 127 return -ENOMEM;
126 128
@@ -150,9 +152,10 @@ static void release_bts_buffer(int cpu)
150 152
151static int alloc_ds_buffer(int cpu) 153static int alloc_ds_buffer(int cpu)
152{ 154{
155 int node = cpu_to_node(cpu);
153 struct debug_store *ds; 156 struct debug_store *ds;
154 157
155 ds = kzalloc(sizeof(*ds), GFP_KERNEL); 158 ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
156 if (unlikely(!ds)) 159 if (unlikely(!ds))
157 return -ENOMEM; 160 return -ENOMEM;
158 161