diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 66 |
1 files changed, 36 insertions, 30 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index b0c7aa407943..731201bf4acc 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -997,15 +997,21 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
997 | unsigned nr_pages) | 997 | unsigned nr_pages) |
998 | { | 998 | { |
999 | struct buffer_page *bpage, *tmp; | 999 | struct buffer_page *bpage, *tmp; |
1000 | unsigned long addr; | ||
1001 | LIST_HEAD(pages); | 1000 | LIST_HEAD(pages); |
1002 | unsigned i; | 1001 | unsigned i; |
1003 | 1002 | ||
1004 | WARN_ON(!nr_pages); | 1003 | WARN_ON(!nr_pages); |
1005 | 1004 | ||
1006 | for (i = 0; i < nr_pages; i++) { | 1005 | for (i = 0; i < nr_pages; i++) { |
1006 | struct page *page; | ||
1007 | /* | ||
1008 | * __GFP_NORETRY flag makes sure that the allocation fails | ||
1009 | * gracefully without invoking oom-killer and the system is | ||
1010 | * not destabilized. | ||
1011 | */ | ||
1007 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1012 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1008 | GFP_KERNEL, cpu_to_node(cpu_buffer->cpu)); | 1013 | GFP_KERNEL | __GFP_NORETRY, |
1014 | cpu_to_node(cpu_buffer->cpu)); | ||
1009 | if (!bpage) | 1015 | if (!bpage) |
1010 | goto free_pages; | 1016 | goto free_pages; |
1011 | 1017 | ||
@@ -1013,10 +1019,11 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1013 | 1019 | ||
1014 | list_add(&bpage->list, &pages); | 1020 | list_add(&bpage->list, &pages); |
1015 | 1021 | ||
1016 | addr = __get_free_page(GFP_KERNEL); | 1022 | page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), |
1017 | if (!addr) | 1023 | GFP_KERNEL | __GFP_NORETRY, 0); |
1024 | if (!page) | ||
1018 | goto free_pages; | 1025 | goto free_pages; |
1019 | bpage->page = (void *)addr; | 1026 | bpage->page = page_address(page); |
1020 | rb_init_page(bpage->page); | 1027 | rb_init_page(bpage->page); |
1021 | } | 1028 | } |
1022 | 1029 | ||
@@ -1045,7 +1052,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
1045 | { | 1052 | { |
1046 | struct ring_buffer_per_cpu *cpu_buffer; | 1053 | struct ring_buffer_per_cpu *cpu_buffer; |
1047 | struct buffer_page *bpage; | 1054 | struct buffer_page *bpage; |
1048 | unsigned long addr; | 1055 | struct page *page; |
1049 | int ret; | 1056 | int ret; |
1050 | 1057 | ||
1051 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), | 1058 | cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), |
@@ -1067,10 +1074,10 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
1067 | rb_check_bpage(cpu_buffer, bpage); | 1074 | rb_check_bpage(cpu_buffer, bpage); |
1068 | 1075 | ||
1069 | cpu_buffer->reader_page = bpage; | 1076 | cpu_buffer->reader_page = bpage; |
1070 | addr = __get_free_page(GFP_KERNEL); | 1077 | page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); |
1071 | if (!addr) | 1078 | if (!page) |
1072 | goto fail_free_reader; | 1079 | goto fail_free_reader; |
1073 | bpage->page = (void *)addr; | 1080 | bpage->page = page_address(page); |
1074 | rb_init_page(bpage->page); | 1081 | rb_init_page(bpage->page); |
1075 | 1082 | ||
1076 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); | 1083 | INIT_LIST_HEAD(&cpu_buffer->reader_page->list); |
@@ -1314,7 +1321,6 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1314 | unsigned nr_pages, rm_pages, new_pages; | 1321 | unsigned nr_pages, rm_pages, new_pages; |
1315 | struct buffer_page *bpage, *tmp; | 1322 | struct buffer_page *bpage, *tmp; |
1316 | unsigned long buffer_size; | 1323 | unsigned long buffer_size; |
1317 | unsigned long addr; | ||
1318 | LIST_HEAD(pages); | 1324 | LIST_HEAD(pages); |
1319 | int i, cpu; | 1325 | int i, cpu; |
1320 | 1326 | ||
@@ -1375,16 +1381,24 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
1375 | 1381 | ||
1376 | for_each_buffer_cpu(buffer, cpu) { | 1382 | for_each_buffer_cpu(buffer, cpu) { |
1377 | for (i = 0; i < new_pages; i++) { | 1383 | for (i = 0; i < new_pages; i++) { |
1384 | struct page *page; | ||
1385 | /* | ||
1386 | * __GFP_NORETRY flag makes sure that the allocation | ||
1387 | * fails gracefully without invoking oom-killer and | ||
1388 | * the system is not destabilized. | ||
1389 | */ | ||
1378 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), | 1390 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), |
1379 | cache_line_size()), | 1391 | cache_line_size()), |
1380 | GFP_KERNEL, cpu_to_node(cpu)); | 1392 | GFP_KERNEL | __GFP_NORETRY, |
1393 | cpu_to_node(cpu)); | ||
1381 | if (!bpage) | 1394 | if (!bpage) |
1382 | goto free_pages; | 1395 | goto free_pages; |
1383 | list_add(&bpage->list, &pages); | 1396 | list_add(&bpage->list, &pages); |
1384 | addr = __get_free_page(GFP_KERNEL); | 1397 | page = alloc_pages_node(cpu_to_node(cpu), |
1385 | if (!addr) | 1398 | GFP_KERNEL | __GFP_NORETRY, 0); |
1399 | if (!page) | ||
1386 | goto free_pages; | 1400 | goto free_pages; |
1387 | bpage->page = (void *)addr; | 1401 | bpage->page = page_address(page); |
1388 | rb_init_page(bpage->page); | 1402 | rb_init_page(bpage->page); |
1389 | } | 1403 | } |
1390 | } | 1404 | } |
@@ -3730,16 +3744,17 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | |||
3730 | * Returns: | 3744 | * Returns: |
3731 | * The page allocated, or NULL on error. | 3745 | * The page allocated, or NULL on error. |
3732 | */ | 3746 | */ |
3733 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | 3747 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) |
3734 | { | 3748 | { |
3735 | struct buffer_data_page *bpage; | 3749 | struct buffer_data_page *bpage; |
3736 | unsigned long addr; | 3750 | struct page *page; |
3737 | 3751 | ||
3738 | addr = __get_free_page(GFP_KERNEL); | 3752 | page = alloc_pages_node(cpu_to_node(cpu), |
3739 | if (!addr) | 3753 | GFP_KERNEL | __GFP_NORETRY, 0); |
3754 | if (!page) | ||
3740 | return NULL; | 3755 | return NULL; |
3741 | 3756 | ||
3742 | bpage = (void *)addr; | 3757 | bpage = page_address(page); |
3743 | 3758 | ||
3744 | rb_init_page(bpage); | 3759 | rb_init_page(bpage); |
3745 | 3760 | ||
@@ -3978,20 +3993,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf, | |||
3978 | size_t cnt, loff_t *ppos) | 3993 | size_t cnt, loff_t *ppos) |
3979 | { | 3994 | { |
3980 | unsigned long *p = filp->private_data; | 3995 | unsigned long *p = filp->private_data; |
3981 | char buf[64]; | ||
3982 | unsigned long val; | 3996 | unsigned long val; |
3983 | int ret; | 3997 | int ret; |
3984 | 3998 | ||
3985 | if (cnt >= sizeof(buf)) | 3999 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
3986 | return -EINVAL; | 4000 | if (ret) |
3987 | |||
3988 | if (copy_from_user(&buf, ubuf, cnt)) | ||
3989 | return -EFAULT; | ||
3990 | |||
3991 | buf[cnt] = 0; | ||
3992 | |||
3993 | ret = strict_strtoul(buf, 10, &val); | ||
3994 | if (ret < 0) | ||
3995 | return ret; | 4001 | return ret; |
3996 | 4002 | ||
3997 | if (val) | 4003 | if (val) |