aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2010-05-17 12:48:00 -0400
committerIngo Molnar <mingo@elte.hu>2010-05-18 12:35:47 -0400
commita19d35c11fd559dd7dfd5a2078df7c9af74a5d88 (patch)
tree54cc4f3f5813eb7afd81d089d581a098df896f11 /kernel
parent00d1d0b095ba4e5c0958cb228b2a9c445d4a339d (diff)
perf: Optimize buffer placement by allocating buffers NUMA aware
Ensure cpu bound buffers live on the right NUMA node. Suggested-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <1274114880.5605.5236.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/perf_event.c17
1 files changed, 15 insertions, 2 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 8d61d292f719..6ae62186dd0c 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2320,6 +2320,19 @@ perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2320 return virt_to_page(data->data_pages[pgoff - 1]); 2320 return virt_to_page(data->data_pages[pgoff - 1]);
2321} 2321}
2322 2322
2323static void *perf_mmap_alloc_page(int cpu)
2324{
2325 struct page *page;
2326 int node;
2327
2328 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
2329 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
2330 if (!page)
2331 return NULL;
2332
2333 return page_address(page);
2334}
2335
2323static struct perf_mmap_data * 2336static struct perf_mmap_data *
2324perf_mmap_data_alloc(struct perf_event *event, int nr_pages) 2337perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2325{ 2338{
@@ -2336,12 +2349,12 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2336 if (!data) 2349 if (!data)
2337 goto fail; 2350 goto fail;
2338 2351
2339 data->user_page = (void *)get_zeroed_page(GFP_KERNEL); 2352 data->user_page = perf_mmap_alloc_page(event->cpu);
2340 if (!data->user_page) 2353 if (!data->user_page)
2341 goto fail_user_page; 2354 goto fail_user_page;
2342 2355
2343 for (i = 0; i < nr_pages; i++) { 2356 for (i = 0; i < nr_pages; i++) {
2344 data->data_pages[i] = (void *)get_zeroed_page(GFP_KERNEL); 2357 data->data_pages[i] = perf_mmap_alloc_page(event->cpu);
2345 if (!data->data_pages[i]) 2358 if (!data->data_pages[i])
2346 goto fail_data_pages; 2359 goto fail_data_pages;
2347 } 2360 }