aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/perf_event.h2
-rw-r--r--kernel/perf_event.c41
2 files changed, 27 insertions, 16 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 7bd17f0488f8..09cd9c1abfda 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -588,8 +588,8 @@ struct perf_mmap_data {
588 struct rcu_head rcu_head; 588 struct rcu_head rcu_head;
589#ifdef CONFIG_PERF_USE_VMALLOC 589#ifdef CONFIG_PERF_USE_VMALLOC
590 struct work_struct work; 590 struct work_struct work;
591 int page_order; /* allocation order */
591#endif 592#endif
592 int data_order; /* allocation order */
593 int nr_pages; /* nr of data pages */ 593 int nr_pages; /* nr of data pages */
594 int writable; /* are we writable */ 594 int writable; /* are we writable */
595 int nr_locked; /* nr pages mlocked */ 595 int nr_locked; /* nr pages mlocked */
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index b67549a08626..953ce46d7b2f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -2297,11 +2297,6 @@ unlock:
2297 rcu_read_unlock(); 2297 rcu_read_unlock();
2298} 2298}
2299 2299
2300static unsigned long perf_data_size(struct perf_mmap_data *data)
2301{
2302 return data->nr_pages << (PAGE_SHIFT + data->data_order);
2303}
2304
2305#ifndef CONFIG_PERF_USE_VMALLOC 2300#ifndef CONFIG_PERF_USE_VMALLOC
2306 2301
2307/* 2302/*
@@ -2359,7 +2354,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2359 goto fail_data_pages; 2354 goto fail_data_pages;
2360 } 2355 }
2361 2356
2362 data->data_order = 0;
2363 data->nr_pages = nr_pages; 2357 data->nr_pages = nr_pages;
2364 2358
2365 return data; 2359 return data;
@@ -2395,6 +2389,11 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
2395 kfree(data); 2389 kfree(data);
2396} 2390}
2397 2391
2392static inline int page_order(struct perf_mmap_data *data)
2393{
2394 return 0;
2395}
2396
2398#else 2397#else
2399 2398
2400/* 2399/*
@@ -2403,10 +2402,15 @@ static void perf_mmap_data_free(struct perf_mmap_data *data)
2403 * Required for architectures that have d-cache aliasing issues. 2402 * Required for architectures that have d-cache aliasing issues.
2404 */ 2403 */
2405 2404
2405static inline int page_order(struct perf_mmap_data *data)
2406{
2407 return data->page_order;
2408}
2409
2406static struct page * 2410static struct page *
2407perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) 2411perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff)
2408{ 2412{
2409 if (pgoff > (1UL << data->data_order)) 2413 if (pgoff > (1UL << page_order(data)))
2410 return NULL; 2414 return NULL;
2411 2415
2412 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); 2416 return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE);
@@ -2426,7 +2430,7 @@ static void perf_mmap_data_free_work(struct work_struct *work)
2426 int i, nr; 2430 int i, nr;
2427 2431
2428 data = container_of(work, struct perf_mmap_data, work); 2432 data = container_of(work, struct perf_mmap_data, work);
2429 nr = 1 << data->data_order; 2433 nr = 1 << page_order(data);
2430 2434
2431 base = data->user_page; 2435 base = data->user_page;
2432 for (i = 0; i < nr + 1; i++) 2436 for (i = 0; i < nr + 1; i++)
@@ -2465,7 +2469,7 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages)
2465 2469
2466 data->user_page = all_buf; 2470 data->user_page = all_buf;
2467 data->data_pages[0] = all_buf + PAGE_SIZE; 2471 data->data_pages[0] = all_buf + PAGE_SIZE;
2468 data->data_order = ilog2(nr_pages); 2472 data->page_order = ilog2(nr_pages);
2469 data->nr_pages = 1; 2473 data->nr_pages = 1;
2470 2474
2471 return data; 2475 return data;
@@ -2479,6 +2483,11 @@ fail:
2479 2483
2480#endif 2484#endif
2481 2485
2486static unsigned long perf_data_size(struct perf_mmap_data *data)
2487{
2488 return data->nr_pages << (PAGE_SHIFT + page_order(data));
2489}
2490
2482static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 2491static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
2483{ 2492{
2484 struct perf_event *event = vma->vm_file->private_data; 2493 struct perf_event *event = vma->vm_file->private_data;
@@ -2979,10 +2988,12 @@ void perf_output_copy(struct perf_output_handle *handle,
2979 handle->addr += size; 2988 handle->addr += size;
2980 handle->size -= size; 2989 handle->size -= size;
2981 if (!handle->size) { 2990 if (!handle->size) {
2991 struct perf_mmap_data *data = handle->data;
2992
2982 handle->page++; 2993 handle->page++;
2983 handle->page &= handle->data->nr_pages - 1; 2994 handle->page &= data->nr_pages - 1;
2984 handle->addr = handle->data->data_pages[handle->page]; 2995 handle->addr = data->data_pages[handle->page];
2985 handle->size = PAGE_SIZE << handle->data->data_order; 2996 handle->size = PAGE_SIZE << page_order(data);
2986 } 2997 }
2987 } while (len); 2998 } while (len);
2988} 2999}
@@ -3050,12 +3061,12 @@ int perf_output_begin(struct perf_output_handle *handle,
3050 if (head - local_read(&data->wakeup) > data->watermark) 3061 if (head - local_read(&data->wakeup) > data->watermark)
3051 local_add(data->watermark, &data->wakeup); 3062 local_add(data->watermark, &data->wakeup);
3052 3063
3053 handle->page = handle->offset >> (PAGE_SHIFT + data->data_order); 3064 handle->page = handle->offset >> (PAGE_SHIFT + page_order(data));
3054 handle->page &= data->nr_pages - 1; 3065 handle->page &= data->nr_pages - 1;
3055 handle->size = handle->offset & ((PAGE_SIZE << data->data_order) - 1); 3066 handle->size = handle->offset & ((PAGE_SIZE << page_order(data)) - 1);
3056 handle->addr = data->data_pages[handle->page]; 3067 handle->addr = data->data_pages[handle->page];
3057 handle->addr += handle->size; 3068 handle->addr += handle->size;
3058 handle->size = (PAGE_SIZE << data->data_order) - handle->size; 3069 handle->size = (PAGE_SIZE << page_order(data)) - handle->size;
3059 3070
3060 if (have_lost) { 3071 if (have_lost) {
3061 lost_event.header.type = PERF_RECORD_LOST; 3072 lost_event.header.type = PERF_RECORD_LOST;