diff options
Diffstat (limited to 'kernel/trace/ring_buffer.c')
| -rw-r--r-- | kernel/trace/ring_buffer.c | 73 |
1 files changed, 33 insertions, 40 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 5dd017fea6f5..edefe3b2801b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -397,18 +397,21 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
| 397 | int ret; | 397 | int ret; |
| 398 | 398 | ||
| 399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 399 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
| 400 | "offset:0;\tsize:%u;\n", | 400 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
| 401 | (unsigned int)sizeof(field.time_stamp)); | 401 | (unsigned int)sizeof(field.time_stamp), |
| 402 | (unsigned int)is_signed_type(u64)); | ||
| 402 | 403 | ||
| 403 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | 404 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" |
| 404 | "offset:%u;\tsize:%u;\n", | 405 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
| 405 | (unsigned int)offsetof(typeof(field), commit), | 406 | (unsigned int)offsetof(typeof(field), commit), |
| 406 | (unsigned int)sizeof(field.commit)); | 407 | (unsigned int)sizeof(field.commit), |
| 408 | (unsigned int)is_signed_type(long)); | ||
| 407 | 409 | ||
| 408 | ret = trace_seq_printf(s, "\tfield: char data;\t" | 410 | ret = trace_seq_printf(s, "\tfield: char data;\t" |
| 409 | "offset:%u;\tsize:%u;\n", | 411 | "offset:%u;\tsize:%u;\tsigned:%u;\n", |
| 410 | (unsigned int)offsetof(typeof(field), data), | 412 | (unsigned int)offsetof(typeof(field), data), |
| 411 | (unsigned int)BUF_PAGE_SIZE); | 413 | (unsigned int)BUF_PAGE_SIZE, |
| 414 | (unsigned int)is_signed_type(char)); | ||
| 412 | 415 | ||
| 413 | return ret; | 416 | return ret; |
| 414 | } | 417 | } |
| @@ -420,7 +423,7 @@ struct ring_buffer_per_cpu { | |||
| 420 | int cpu; | 423 | int cpu; |
| 421 | struct ring_buffer *buffer; | 424 | struct ring_buffer *buffer; |
| 422 | spinlock_t reader_lock; /* serialize readers */ | 425 | spinlock_t reader_lock; /* serialize readers */ |
| 423 | raw_spinlock_t lock; | 426 | arch_spinlock_t lock; |
| 424 | struct lock_class_key lock_key; | 427 | struct lock_class_key lock_key; |
| 425 | struct list_head *pages; | 428 | struct list_head *pages; |
| 426 | struct buffer_page *head_page; /* read from head */ | 429 | struct buffer_page *head_page; /* read from head */ |
| @@ -995,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu) | |||
| 995 | cpu_buffer->buffer = buffer; | 998 | cpu_buffer->buffer = buffer; |
| 996 | spin_lock_init(&cpu_buffer->reader_lock); | 999 | spin_lock_init(&cpu_buffer->reader_lock); |
| 997 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1000 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
| 998 | cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 1001 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
| 999 | 1002 | ||
| 1000 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1003 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
| 1001 | GFP_KERNEL, cpu_to_node(cpu)); | 1004 | GFP_KERNEL, cpu_to_node(cpu)); |
| @@ -1190,9 +1193,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1190 | struct list_head *p; | 1193 | struct list_head *p; |
| 1191 | unsigned i; | 1194 | unsigned i; |
| 1192 | 1195 | ||
| 1193 | atomic_inc(&cpu_buffer->record_disabled); | ||
| 1194 | synchronize_sched(); | ||
| 1195 | |||
| 1196 | spin_lock_irq(&cpu_buffer->reader_lock); | 1196 | spin_lock_irq(&cpu_buffer->reader_lock); |
| 1197 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
| 1198 | 1198 | ||
| @@ -1208,12 +1208,9 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1208 | return; | 1208 | return; |
| 1209 | 1209 | ||
| 1210 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
| 1211 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
| 1212 | |||
| 1213 | rb_check_pages(cpu_buffer); | 1211 | rb_check_pages(cpu_buffer); |
| 1214 | 1212 | ||
| 1215 | atomic_dec(&cpu_buffer->record_disabled); | 1213 | spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1216 | |||
| 1217 | } | 1214 | } |
| 1218 | 1215 | ||
| 1219 | static void | 1216 | static void |
| @@ -1224,9 +1221,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1224 | struct list_head *p; | 1221 | struct list_head *p; |
| 1225 | unsigned i; | 1222 | unsigned i; |
| 1226 | 1223 | ||
| 1227 | atomic_inc(&cpu_buffer->record_disabled); | ||
| 1228 | synchronize_sched(); | ||
| 1229 | |||
| 1230 | spin_lock_irq(&cpu_buffer->reader_lock); | 1224 | spin_lock_irq(&cpu_buffer->reader_lock); |
| 1231 | rb_head_page_deactivate(cpu_buffer); | 1225 | rb_head_page_deactivate(cpu_buffer); |
| 1232 | 1226 | ||
| @@ -1239,11 +1233,9 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1239 | list_add_tail(&bpage->list, cpu_buffer->pages); | 1233 | list_add_tail(&bpage->list, cpu_buffer->pages); |
| 1240 | } | 1234 | } |
| 1241 | rb_reset_cpu(cpu_buffer); | 1235 | rb_reset_cpu(cpu_buffer); |
| 1242 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
| 1243 | |||
| 1244 | rb_check_pages(cpu_buffer); | 1236 | rb_check_pages(cpu_buffer); |
| 1245 | 1237 | ||
| 1246 | atomic_dec(&cpu_buffer->record_disabled); | 1238 | spin_unlock_irq(&cpu_buffer->reader_lock); |
| 1247 | } | 1239 | } |
| 1248 | 1240 | ||
| 1249 | /** | 1241 | /** |
| @@ -1251,11 +1243,6 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1251 | * @buffer: the buffer to resize. | 1243 | * @buffer: the buffer to resize. |
| 1252 | * @size: the new size. | 1244 | * @size: the new size. |
| 1253 | * | 1245 | * |
| 1254 | * The tracer is responsible for making sure that the buffer is | ||
| 1255 | * not being used while changing the size. | ||
| 1256 | * Note: We may be able to change the above requirement by using | ||
| 1257 | * RCU synchronizations. | ||
| 1258 | * | ||
| 1259 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1246 | * Minimum size is 2 * BUF_PAGE_SIZE. |
| 1260 | * | 1247 | * |
| 1261 | * Returns -1 on failure. | 1248 | * Returns -1 on failure. |
| @@ -1287,6 +1274,11 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1287 | if (size == buffer_size) | 1274 | if (size == buffer_size) |
| 1288 | return size; | 1275 | return size; |
| 1289 | 1276 | ||
| 1277 | atomic_inc(&buffer->record_disabled); | ||
| 1278 | |||
| 1279 | /* Make sure all writers are done with this buffer. */ | ||
| 1280 | synchronize_sched(); | ||
| 1281 | |||
| 1290 | mutex_lock(&buffer->mutex); | 1282 | mutex_lock(&buffer->mutex); |
| 1291 | get_online_cpus(); | 1283 | get_online_cpus(); |
| 1292 | 1284 | ||
| @@ -1349,6 +1341,8 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1349 | put_online_cpus(); | 1341 | put_online_cpus(); |
| 1350 | mutex_unlock(&buffer->mutex); | 1342 | mutex_unlock(&buffer->mutex); |
| 1351 | 1343 | ||
| 1344 | atomic_dec(&buffer->record_disabled); | ||
| 1345 | |||
| 1352 | return size; | 1346 | return size; |
| 1353 | 1347 | ||
| 1354 | free_pages: | 1348 | free_pages: |
| @@ -1358,6 +1352,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1358 | } | 1352 | } |
| 1359 | put_online_cpus(); | 1353 | put_online_cpus(); |
| 1360 | mutex_unlock(&buffer->mutex); | 1354 | mutex_unlock(&buffer->mutex); |
| 1355 | atomic_dec(&buffer->record_disabled); | ||
| 1361 | return -ENOMEM; | 1356 | return -ENOMEM; |
| 1362 | 1357 | ||
| 1363 | /* | 1358 | /* |
| @@ -1367,6 +1362,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size) | |||
| 1367 | out_fail: | 1362 | out_fail: |
| 1368 | put_online_cpus(); | 1363 | put_online_cpus(); |
| 1369 | mutex_unlock(&buffer->mutex); | 1364 | mutex_unlock(&buffer->mutex); |
| 1365 | atomic_dec(&buffer->record_disabled); | ||
| 1370 | return -1; | 1366 | return -1; |
| 1371 | } | 1367 | } |
| 1372 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 1368 | EXPORT_SYMBOL_GPL(ring_buffer_resize); |
| @@ -1787,9 +1783,9 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1787 | static struct ring_buffer_event * | 1783 | static struct ring_buffer_event * |
| 1788 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, | 1784 | rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, |
| 1789 | unsigned long length, unsigned long tail, | 1785 | unsigned long length, unsigned long tail, |
| 1790 | struct buffer_page *commit_page, | ||
| 1791 | struct buffer_page *tail_page, u64 *ts) | 1786 | struct buffer_page *tail_page, u64 *ts) |
| 1792 | { | 1787 | { |
| 1788 | struct buffer_page *commit_page = cpu_buffer->commit_page; | ||
| 1793 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1789 | struct ring_buffer *buffer = cpu_buffer->buffer; |
| 1794 | struct buffer_page *next_page; | 1790 | struct buffer_page *next_page; |
| 1795 | int ret; | 1791 | int ret; |
| @@ -1892,13 +1888,10 @@ static struct ring_buffer_event * | |||
| 1892 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | 1888 | __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, |
| 1893 | unsigned type, unsigned long length, u64 *ts) | 1889 | unsigned type, unsigned long length, u64 *ts) |
| 1894 | { | 1890 | { |
| 1895 | struct buffer_page *tail_page, *commit_page; | 1891 | struct buffer_page *tail_page; |
| 1896 | struct ring_buffer_event *event; | 1892 | struct ring_buffer_event *event; |
| 1897 | unsigned long tail, write; | 1893 | unsigned long tail, write; |
| 1898 | 1894 | ||
| 1899 | commit_page = cpu_buffer->commit_page; | ||
| 1900 | /* we just need to protect against interrupts */ | ||
| 1901 | barrier(); | ||
| 1902 | tail_page = cpu_buffer->tail_page; | 1895 | tail_page = cpu_buffer->tail_page; |
| 1903 | write = local_add_return(length, &tail_page->write); | 1896 | write = local_add_return(length, &tail_page->write); |
| 1904 | 1897 | ||
| @@ -1909,7 +1902,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
| 1909 | /* See if we shot pass the end of this buffer page */ | 1902 | /* See if we shot pass the end of this buffer page */ |
| 1910 | if (write > BUF_PAGE_SIZE) | 1903 | if (write > BUF_PAGE_SIZE) |
| 1911 | return rb_move_tail(cpu_buffer, length, tail, | 1904 | return rb_move_tail(cpu_buffer, length, tail, |
| 1912 | commit_page, tail_page, ts); | 1905 | tail_page, ts); |
| 1913 | 1906 | ||
| 1914 | /* We reserved something on the buffer */ | 1907 | /* We reserved something on the buffer */ |
| 1915 | 1908 | ||
| @@ -2834,7 +2827,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2834 | int ret; | 2827 | int ret; |
| 2835 | 2828 | ||
| 2836 | local_irq_save(flags); | 2829 | local_irq_save(flags); |
| 2837 | __raw_spin_lock(&cpu_buffer->lock); | 2830 | arch_spin_lock(&cpu_buffer->lock); |
| 2838 | 2831 | ||
| 2839 | again: | 2832 | again: |
| 2840 | /* | 2833 | /* |
| @@ -2876,7 +2869,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2876 | * Splice the empty reader page into the list around the head. | 2869 | * Splice the empty reader page into the list around the head. |
| 2877 | */ | 2870 | */ |
| 2878 | reader = rb_set_head_page(cpu_buffer); | 2871 | reader = rb_set_head_page(cpu_buffer); |
| 2879 | cpu_buffer->reader_page->list.next = reader->list.next; | 2872 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); |
| 2880 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 2873 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
| 2881 | 2874 | ||
| 2882 | /* | 2875 | /* |
| @@ -2913,7 +2906,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2913 | * | 2906 | * |
| 2914 | * Now make the new head point back to the reader page. | 2907 | * Now make the new head point back to the reader page. |
| 2915 | */ | 2908 | */ |
| 2916 | reader->list.next->prev = &cpu_buffer->reader_page->list; | 2909 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; |
| 2917 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 2910 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
| 2918 | 2911 | ||
| 2919 | /* Finally update the reader page to the new head */ | 2912 | /* Finally update the reader page to the new head */ |
| @@ -2923,7 +2916,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2923 | goto again; | 2916 | goto again; |
| 2924 | 2917 | ||
| 2925 | out: | 2918 | out: |
| 2926 | __raw_spin_unlock(&cpu_buffer->lock); | 2919 | arch_spin_unlock(&cpu_buffer->lock); |
| 2927 | local_irq_restore(flags); | 2920 | local_irq_restore(flags); |
| 2928 | 2921 | ||
| 2929 | return reader; | 2922 | return reader; |
| @@ -3286,9 +3279,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu) | |||
| 3286 | synchronize_sched(); | 3279 | synchronize_sched(); |
| 3287 | 3280 | ||
| 3288 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3281 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
| 3289 | __raw_spin_lock(&cpu_buffer->lock); | 3282 | arch_spin_lock(&cpu_buffer->lock); |
| 3290 | rb_iter_reset(iter); | 3283 | rb_iter_reset(iter); |
| 3291 | __raw_spin_unlock(&cpu_buffer->lock); | 3284 | arch_spin_unlock(&cpu_buffer->lock); |
| 3292 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3285 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
| 3293 | 3286 | ||
| 3294 | return iter; | 3287 | return iter; |
| @@ -3408,11 +3401,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
| 3408 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3401 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
| 3409 | goto out; | 3402 | goto out; |
| 3410 | 3403 | ||
| 3411 | __raw_spin_lock(&cpu_buffer->lock); | 3404 | arch_spin_lock(&cpu_buffer->lock); |
| 3412 | 3405 | ||
| 3413 | rb_reset_cpu(cpu_buffer); | 3406 | rb_reset_cpu(cpu_buffer); |
| 3414 | 3407 | ||
| 3415 | __raw_spin_unlock(&cpu_buffer->lock); | 3408 | arch_spin_unlock(&cpu_buffer->lock); |
| 3416 | 3409 | ||
| 3417 | out: | 3410 | out: |
| 3418 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3411 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
