aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ring_buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r--kernel/trace/ring_buffer.c73
1 files changed, 42 insertions, 31 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index a1ca4956ab5e..8c1b2d290718 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
423 int cpu; 423 int cpu;
424 struct ring_buffer *buffer; 424 struct ring_buffer *buffer;
425 spinlock_t reader_lock; /* serialize readers */ 425 spinlock_t reader_lock; /* serialize readers */
426 raw_spinlock_t lock; 426 arch_spinlock_t lock;
427 struct lock_class_key lock_key; 427 struct lock_class_key lock_key;
428 struct list_head *pages; 428 struct list_head *pages;
429 struct buffer_page *head_page; /* read from head */ 429 struct buffer_page *head_page; /* read from head */
@@ -464,6 +464,8 @@ struct ring_buffer_iter {
464 struct ring_buffer_per_cpu *cpu_buffer; 464 struct ring_buffer_per_cpu *cpu_buffer;
465 unsigned long head; 465 unsigned long head;
466 struct buffer_page *head_page; 466 struct buffer_page *head_page;
467 struct buffer_page *cache_reader_page;
468 unsigned long cache_read;
467 u64 read_stamp; 469 u64 read_stamp;
468}; 470};
469 471
@@ -998,7 +1000,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
998 cpu_buffer->buffer = buffer; 1000 cpu_buffer->buffer = buffer;
999 spin_lock_init(&cpu_buffer->reader_lock); 1001 spin_lock_init(&cpu_buffer->reader_lock);
1000 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); 1002 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1001 cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 1003 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1002 1004
1003 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), 1005 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1004 GFP_KERNEL, cpu_to_node(cpu)); 1006 GFP_KERNEL, cpu_to_node(cpu));
@@ -1193,9 +1195,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1193 struct list_head *p; 1195 struct list_head *p;
1194 unsigned i; 1196 unsigned i;
1195 1197
1196 atomic_inc(&cpu_buffer->record_disabled);
1197 synchronize_sched();
1198
1199 spin_lock_irq(&cpu_buffer->reader_lock); 1198 spin_lock_irq(&cpu_buffer->reader_lock);
1200 rb_head_page_deactivate(cpu_buffer); 1199 rb_head_page_deactivate(cpu_buffer);
1201 1200
@@ -1211,12 +1210,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1211 return; 1210 return;
1212 1211
1213 rb_reset_cpu(cpu_buffer); 1212 rb_reset_cpu(cpu_buffer);
1214 spin_unlock_irq(&cpu_buffer->reader_lock);
1215
1216 rb_check_pages(cpu_buffer); 1213 rb_check_pages(cpu_buffer);
1217 1214
1218 atomic_dec(&cpu_buffer->record_disabled); 1215 spin_unlock_irq(&cpu_buffer->reader_lock);
1219
1220} 1216}
1221 1217
1222static void 1218static void
@@ -1227,9 +1223,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1227 struct list_head *p; 1223 struct list_head *p;
1228 unsigned i; 1224 unsigned i;
1229 1225
1230 atomic_inc(&cpu_buffer->record_disabled);
1231 synchronize_sched();
1232
1233 spin_lock_irq(&cpu_buffer->reader_lock); 1226 spin_lock_irq(&cpu_buffer->reader_lock);
1234 rb_head_page_deactivate(cpu_buffer); 1227 rb_head_page_deactivate(cpu_buffer);
1235 1228
@@ -1242,11 +1235,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1242 list_add_tail(&bpage->list, cpu_buffer->pages); 1235 list_add_tail(&bpage->list, cpu_buffer->pages);
1243 } 1236 }
1244 rb_reset_cpu(cpu_buffer); 1237 rb_reset_cpu(cpu_buffer);
1245 spin_unlock_irq(&cpu_buffer->reader_lock);
1246
1247 rb_check_pages(cpu_buffer); 1238 rb_check_pages(cpu_buffer);
1248 1239
1249 atomic_dec(&cpu_buffer->record_disabled); 1240 spin_unlock_irq(&cpu_buffer->reader_lock);
1250} 1241}
1251 1242
1252/** 1243/**
@@ -1254,11 +1245,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
1254 * @buffer: the buffer to resize. 1245 * @buffer: the buffer to resize.
1255 * @size: the new size. 1246 * @size: the new size.
1256 * 1247 *
1257 * The tracer is responsible for making sure that the buffer is
1258 * not being used while changing the size.
1259 * Note: We may be able to change the above requirement by using
1260 * RCU synchronizations.
1261 *
1262 * Minimum size is 2 * BUF_PAGE_SIZE. 1248 * Minimum size is 2 * BUF_PAGE_SIZE.
1263 * 1249 *
1264 * Returns -1 on failure. 1250 * Returns -1 on failure.
@@ -1290,6 +1276,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1290 if (size == buffer_size) 1276 if (size == buffer_size)
1291 return size; 1277 return size;
1292 1278
1279 atomic_inc(&buffer->record_disabled);
1280
1281 /* Make sure all writers are done with this buffer. */
1282 synchronize_sched();
1283
1293 mutex_lock(&buffer->mutex); 1284 mutex_lock(&buffer->mutex);
1294 get_online_cpus(); 1285 get_online_cpus();
1295 1286
@@ -1352,6 +1343,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1352 put_online_cpus(); 1343 put_online_cpus();
1353 mutex_unlock(&buffer->mutex); 1344 mutex_unlock(&buffer->mutex);
1354 1345
1346 atomic_dec(&buffer->record_disabled);
1347
1355 return size; 1348 return size;
1356 1349
1357 free_pages: 1350 free_pages:
@@ -1361,6 +1354,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1361 } 1354 }
1362 put_online_cpus(); 1355 put_online_cpus();
1363 mutex_unlock(&buffer->mutex); 1356 mutex_unlock(&buffer->mutex);
1357 atomic_dec(&buffer->record_disabled);
1364 return -ENOMEM; 1358 return -ENOMEM;
1365 1359
1366 /* 1360 /*
@@ -1370,6 +1364,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
1370 out_fail: 1364 out_fail:
1371 put_online_cpus(); 1365 put_online_cpus();
1372 mutex_unlock(&buffer->mutex); 1366 mutex_unlock(&buffer->mutex);
1367 atomic_dec(&buffer->record_disabled);
1373 return -1; 1368 return -1;
1374} 1369}
1375EXPORT_SYMBOL_GPL(ring_buffer_resize); 1370EXPORT_SYMBOL_GPL(ring_buffer_resize);
@@ -2723,6 +2718,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
2723 iter->read_stamp = cpu_buffer->read_stamp; 2718 iter->read_stamp = cpu_buffer->read_stamp;
2724 else 2719 else
2725 iter->read_stamp = iter->head_page->page->time_stamp; 2720 iter->read_stamp = iter->head_page->page->time_stamp;
2721 iter->cache_reader_page = cpu_buffer->reader_page;
2722 iter->cache_read = cpu_buffer->read;
2726} 2723}
2727 2724
2728/** 2725/**
@@ -2834,7 +2831,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2834 int ret; 2831 int ret;
2835 2832
2836 local_irq_save(flags); 2833 local_irq_save(flags);
2837 __raw_spin_lock(&cpu_buffer->lock); 2834 arch_spin_lock(&cpu_buffer->lock);
2838 2835
2839 again: 2836 again:
2840 /* 2837 /*
@@ -2876,7 +2873,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2876 * Splice the empty reader page into the list around the head. 2873 * Splice the empty reader page into the list around the head.
2877 */ 2874 */
2878 reader = rb_set_head_page(cpu_buffer); 2875 reader = rb_set_head_page(cpu_buffer);
2879 cpu_buffer->reader_page->list.next = reader->list.next; 2876 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
2880 cpu_buffer->reader_page->list.prev = reader->list.prev; 2877 cpu_buffer->reader_page->list.prev = reader->list.prev;
2881 2878
2882 /* 2879 /*
@@ -2913,7 +2910,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2913 * 2910 *
2914 * Now make the new head point back to the reader page. 2911 * Now make the new head point back to the reader page.
2915 */ 2912 */
2916 reader->list.next->prev = &cpu_buffer->reader_page->list; 2913 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
2917 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); 2914 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
2918 2915
2919 /* Finally update the reader page to the new head */ 2916 /* Finally update the reader page to the new head */
@@ -2923,7 +2920,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2923 goto again; 2920 goto again;
2924 2921
2925 out: 2922 out:
2926 __raw_spin_unlock(&cpu_buffer->lock); 2923 arch_spin_unlock(&cpu_buffer->lock);
2927 local_irq_restore(flags); 2924 local_irq_restore(flags);
2928 2925
2929 return reader; 2926 return reader;
@@ -3067,13 +3064,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3067 struct ring_buffer_event *event; 3064 struct ring_buffer_event *event;
3068 int nr_loops = 0; 3065 int nr_loops = 0;
3069 3066
3070 if (ring_buffer_iter_empty(iter))
3071 return NULL;
3072
3073 cpu_buffer = iter->cpu_buffer; 3067 cpu_buffer = iter->cpu_buffer;
3074 buffer = cpu_buffer->buffer; 3068 buffer = cpu_buffer->buffer;
3075 3069
3070 /*
3071 * Check if someone performed a consuming read to
3072 * the buffer. A consuming read invalidates the iterator
3073 * and we need to reset the iterator in this case.
3074 */
3075 if (unlikely(iter->cache_read != cpu_buffer->read ||
3076 iter->cache_reader_page != cpu_buffer->reader_page))
3077 rb_iter_reset(iter);
3078
3076 again: 3079 again:
3080 if (ring_buffer_iter_empty(iter))
3081 return NULL;
3082
3077 /* 3083 /*
3078 * We repeat when a timestamp is encountered. 3084 * We repeat when a timestamp is encountered.
3079 * We can get multiple timestamps by nested interrupts or also 3085 * We can get multiple timestamps by nested interrupts or also
@@ -3088,6 +3094,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3088 if (rb_per_cpu_empty(cpu_buffer)) 3094 if (rb_per_cpu_empty(cpu_buffer))
3089 return NULL; 3095 return NULL;
3090 3096
3097 if (iter->head >= local_read(&iter->head_page->page->commit)) {
3098 rb_inc_iter(iter);
3099 goto again;
3100 }
3101
3091 event = rb_iter_head_event(iter); 3102 event = rb_iter_head_event(iter);
3092 3103
3093 switch (event->type_len) { 3104 switch (event->type_len) {
@@ -3286,9 +3297,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
3286 synchronize_sched(); 3297 synchronize_sched();
3287 3298
3288 spin_lock_irqsave(&cpu_buffer->reader_lock, flags); 3299 spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3289 __raw_spin_lock(&cpu_buffer->lock); 3300 arch_spin_lock(&cpu_buffer->lock);
3290 rb_iter_reset(iter); 3301 rb_iter_reset(iter);
3291 __raw_spin_unlock(&cpu_buffer->lock); 3302 arch_spin_unlock(&cpu_buffer->lock);
3292 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3303 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3293 3304
3294 return iter; 3305 return iter;
@@ -3408,11 +3419,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
3408 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) 3419 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
3409 goto out; 3420 goto out;
3410 3421
3411 __raw_spin_lock(&cpu_buffer->lock); 3422 arch_spin_lock(&cpu_buffer->lock);
3412 3423
3413 rb_reset_cpu(cpu_buffer); 3424 rb_reset_cpu(cpu_buffer);
3414 3425
3415 __raw_spin_unlock(&cpu_buffer->lock); 3426 arch_spin_unlock(&cpu_buffer->lock);
3416 3427
3417 out: 3428 out:
3418 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); 3429 spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);