aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 95181e36891a..99d64cd58c52 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -437,7 +437,7 @@ struct ring_buffer_per_cpu {
437 raw_spinlock_t reader_lock; /* serialize readers */ 437 raw_spinlock_t reader_lock; /* serialize readers */
438 arch_spinlock_t lock; 438 arch_spinlock_t lock;
439 struct lock_class_key lock_key; 439 struct lock_class_key lock_key;
440 unsigned int nr_pages; 440 unsigned long nr_pages;
441 unsigned int current_context; 441 unsigned int current_context;
442 struct list_head *pages; 442 struct list_head *pages;
443 struct buffer_page *head_page; /* read from head */ 443 struct buffer_page *head_page; /* read from head */
@@ -458,7 +458,7 @@ struct ring_buffer_per_cpu {
458 u64 write_stamp; 458 u64 write_stamp;
459 u64 read_stamp; 459 u64 read_stamp;
460 /* ring buffer pages to update, > 0 to add, < 0 to remove */ 460 /* ring buffer pages to update, > 0 to add, < 0 to remove */
461 int nr_pages_to_update; 461 long nr_pages_to_update;
462 struct list_head new_pages; /* new pages to add */ 462 struct list_head new_pages; /* new pages to add */
463 struct work_struct update_pages_work; 463 struct work_struct update_pages_work;
464 struct completion update_done; 464 struct completion update_done;
@@ -1128,10 +1128,10 @@ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1128 return 0; 1128 return 0;
1129} 1129}
1130 1130
1131static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) 1131static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
1132{ 1132{
1133 int i;
1134 struct buffer_page *bpage, *tmp; 1133 struct buffer_page *bpage, *tmp;
1134 long i;
1135 1135
1136 for (i = 0; i < nr_pages; i++) { 1136 for (i = 0; i < nr_pages; i++) {
1137 struct page *page; 1137 struct page *page;
@@ -1168,7 +1168,7 @@ free_pages:
1168} 1168}
1169 1169
1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, 1170static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1171 unsigned nr_pages) 1171 unsigned long nr_pages)
1172{ 1172{
1173 LIST_HEAD(pages); 1173 LIST_HEAD(pages);
1174 1174
@@ -1193,7 +1193,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1193} 1193}
1194 1194
1195static struct ring_buffer_per_cpu * 1195static struct ring_buffer_per_cpu *
1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) 1196rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu)
1197{ 1197{
1198 struct ring_buffer_per_cpu *cpu_buffer; 1198 struct ring_buffer_per_cpu *cpu_buffer;
1199 struct buffer_page *bpage; 1199 struct buffer_page *bpage;
@@ -1293,8 +1293,9 @@ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1293 struct lock_class_key *key) 1293 struct lock_class_key *key)
1294{ 1294{
1295 struct ring_buffer *buffer; 1295 struct ring_buffer *buffer;
1296 long nr_pages;
1296 int bsize; 1297 int bsize;
1297 int cpu, nr_pages; 1298 int cpu;
1298 1299
1299 /* keep it in its own cache line */ 1300 /* keep it in its own cache line */
1300 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), 1301 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
@@ -1420,12 +1421,12 @@ static inline unsigned long rb_page_write(struct buffer_page *bpage)
1420} 1421}
1421 1422
1422static int 1423static int
1423rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) 1424rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages)
1424{ 1425{
1425 struct list_head *tail_page, *to_remove, *next_page; 1426 struct list_head *tail_page, *to_remove, *next_page;
1426 struct buffer_page *to_remove_page, *tmp_iter_page; 1427 struct buffer_page *to_remove_page, *tmp_iter_page;
1427 struct buffer_page *last_page, *first_page; 1428 struct buffer_page *last_page, *first_page;
1428 unsigned int nr_removed; 1429 unsigned long nr_removed;
1429 unsigned long head_bit; 1430 unsigned long head_bit;
1430 int page_entries; 1431 int page_entries;
1431 1432
@@ -1642,7 +1643,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1642 int cpu_id) 1643 int cpu_id)
1643{ 1644{
1644 struct ring_buffer_per_cpu *cpu_buffer; 1645 struct ring_buffer_per_cpu *cpu_buffer;
1645 unsigned nr_pages; 1646 unsigned long nr_pages;
1646 int cpu, err = 0; 1647 int cpu, err = 0;
1647 1648
1648 /* 1649 /*
@@ -4640,8 +4641,9 @@ static int rb_cpu_notify(struct notifier_block *self,
4640 struct ring_buffer *buffer = 4641 struct ring_buffer *buffer =
4641 container_of(self, struct ring_buffer, cpu_notify); 4642 container_of(self, struct ring_buffer, cpu_notify);
4642 long cpu = (long)hcpu; 4643 long cpu = (long)hcpu;
4643 int cpu_i, nr_pages_same; 4644 long nr_pages_same;
4644 unsigned int nr_pages; 4645 int cpu_i;
4646 unsigned long nr_pages;
4645 4647
4646 switch (action) { 4648 switch (action) {
4647 case CPU_UP_PREPARE: 4649 case CPU_UP_PREPARE: