aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/ring_buffer.c
diff options
context:
space:
mode:
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>2015-01-14 07:18:12 -0500
committerIngo Molnar <mingo@kernel.org>2015-04-02 11:14:08 -0400
commit0a4e38e64f5e91ce131cc42ee5bb3925377ec840 (patch)
tree726e06adcb05c8e44e93ad072fdcba6be38450e3 /kernel/events/ring_buffer.c
parent45bfb2e50471abbbfd83d40d28c986078b0d24ff (diff)
perf: Support high-order allocations for AUX space
Some pmus (such as BTS or Intel PT without multiple-entry ToPA capability) don't support scatter-gather and will prefer larger contiguous areas for their output regions. This patch adds a new pmu capability to request higher order allocations. Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kaixu Xia <kaixu.xia@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Robert Richter <rric@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@infradead.org Cc: adrian.hunter@intel.com Cc: kan.liang@intel.com Cc: markus.t.metzger@intel.com Cc: mathieu.poirier@linaro.org Link: http://lkml.kernel.org/r/1421237903-181015-4-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events/ring_buffer.c')
-rw-r--r--kernel/events/ring_buffer.c56
1 files changed, 50 insertions, 6 deletions
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 3de9c4e9ea9f..ed0859e33b2f 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -243,30 +243,74 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
243 spin_lock_init(&rb->event_lock); 243 spin_lock_init(&rb->event_lock);
244} 244}
245 245
246#define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
247
248static struct page *rb_alloc_aux_page(int node, int order)
249{
250 struct page *page;
251
252 if (order > MAX_ORDER)
253 order = MAX_ORDER;
254
255 do {
256 page = alloc_pages_node(node, PERF_AUX_GFP, order);
257 } while (!page && order--);
258
259 if (page && order) {
260 /*
261 * Communicate the allocation size to the driver
262 */
263 split_page(page, order);
264 SetPagePrivate(page);
265 set_page_private(page, order);
266 }
267
268 return page;
269}
270
271static void rb_free_aux_page(struct ring_buffer *rb, int idx)
272{
273 struct page *page = virt_to_page(rb->aux_pages[idx]);
274
275 ClearPagePrivate(page);
276 page->mapping = NULL;
277 __free_page(page);
278}
279
246int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, 280int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
247 pgoff_t pgoff, int nr_pages, int flags) 281 pgoff_t pgoff, int nr_pages, int flags)
248{ 282{
249 bool overwrite = !(flags & RING_BUFFER_WRITABLE); 283 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
250 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu); 284 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
251 int ret = -ENOMEM; 285 int ret = -ENOMEM, max_order = 0;
252 286
253 if (!has_aux(event)) 287 if (!has_aux(event))
254 return -ENOTSUPP; 288 return -ENOTSUPP;
255 289
290 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG)
291 /*
292 * We need to start with the max_order that fits in nr_pages,
293 * not the other way around, hence ilog2() and not get_order.
294 */
295 max_order = ilog2(nr_pages);
296
256 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node); 297 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
257 if (!rb->aux_pages) 298 if (!rb->aux_pages)
258 return -ENOMEM; 299 return -ENOMEM;
259 300
260 rb->free_aux = event->pmu->free_aux; 301 rb->free_aux = event->pmu->free_aux;
261 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages; 302 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
262 rb->aux_nr_pages++) {
263 struct page *page; 303 struct page *page;
304 int last, order;
264 305
265 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); 306 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
307 page = rb_alloc_aux_page(node, order);
266 if (!page) 308 if (!page)
267 goto out; 309 goto out;
268 310
269 rb->aux_pages[rb->aux_nr_pages] = page_address(page); 311 for (last = rb->aux_nr_pages + (1 << page_private(page));
312 last > rb->aux_nr_pages; rb->aux_nr_pages++)
313 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
270 } 314 }
271 315
272 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, 316 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
@@ -304,7 +348,7 @@ static void __rb_free_aux(struct ring_buffer *rb)
304 } 348 }
305 349
306 for (pg = 0; pg < rb->aux_nr_pages; pg++) 350 for (pg = 0; pg < rb->aux_nr_pages; pg++)
307 free_page((unsigned long)rb->aux_pages[pg]); 351 rb_free_aux_page(rb, pg);
308 352
309 kfree(rb->aux_pages); 353 kfree(rb->aux_pages);
310 rb->aux_nr_pages = 0; 354 rb->aux_nr_pages = 0;