diff options
-rw-r--r-- | kernel/trace/ring_buffer.c | 265 | ||||
-rw-r--r-- | kernel/trace/trace.c | 20 |
2 files changed, 209 insertions, 76 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2d5eb3320827..27ac37efb2b0 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <asm/local.h> | 23 | #include <asm/local.h> |
24 | #include "trace.h" | 24 | #include "trace.h" |
25 | 25 | ||
26 | static void update_pages_handler(struct work_struct *work); | ||
27 | |||
26 | /* | 28 | /* |
27 | * The ring buffer header is special. We must manually up keep it. | 29 | * The ring buffer header is special. We must manually up keep it. |
28 | */ | 30 | */ |
@@ -470,12 +472,15 @@ struct ring_buffer_per_cpu { | |||
470 | /* ring buffer pages to update, > 0 to add, < 0 to remove */ | 472 | /* ring buffer pages to update, > 0 to add, < 0 to remove */ |
471 | int nr_pages_to_update; | 473 | int nr_pages_to_update; |
472 | struct list_head new_pages; /* new pages to add */ | 474 | struct list_head new_pages; /* new pages to add */ |
475 | struct work_struct update_pages_work; | ||
476 | struct completion update_completion; | ||
473 | }; | 477 | }; |
474 | 478 | ||
475 | struct ring_buffer { | 479 | struct ring_buffer { |
476 | unsigned flags; | 480 | unsigned flags; |
477 | int cpus; | 481 | int cpus; |
478 | atomic_t record_disabled; | 482 | atomic_t record_disabled; |
483 | atomic_t resize_disabled; | ||
479 | cpumask_var_t cpumask; | 484 | cpumask_var_t cpumask; |
480 | 485 | ||
481 | struct lock_class_key *reader_lock_key; | 486 | struct lock_class_key *reader_lock_key; |
@@ -1048,6 +1053,8 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) | |||
1048 | raw_spin_lock_init(&cpu_buffer->reader_lock); | 1053 | raw_spin_lock_init(&cpu_buffer->reader_lock); |
1049 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); | 1054 | lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); |
1050 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; | 1055 | cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
1056 | INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); | ||
1057 | init_completion(&cpu_buffer->update_completion); | ||
1051 | 1058 | ||
1052 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), | 1059 | bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), |
1053 | GFP_KERNEL, cpu_to_node(cpu)); | 1060 | GFP_KERNEL, cpu_to_node(cpu)); |
@@ -1235,32 +1242,123 @@ void ring_buffer_set_clock(struct ring_buffer *buffer, | |||
1235 | 1242 | ||
1236 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); | 1243 | static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); |
1237 | 1244 | ||
1245 | static inline unsigned long rb_page_entries(struct buffer_page *bpage) | ||
1246 | { | ||
1247 | return local_read(&bpage->entries) & RB_WRITE_MASK; | ||
1248 | } | ||
1249 | |||
1250 | static inline unsigned long rb_page_write(struct buffer_page *bpage) | ||
1251 | { | ||
1252 | return local_read(&bpage->write) & RB_WRITE_MASK; | ||
1253 | } | ||
1254 | |||
1238 | static void | 1255 | static void |
1239 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | 1256 | rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) |
1240 | { | 1257 | { |
1241 | struct buffer_page *bpage; | 1258 | struct list_head *tail_page, *to_remove, *next_page; |
1242 | struct list_head *p; | 1259 | struct buffer_page *to_remove_page, *tmp_iter_page; |
1243 | unsigned i; | 1260 | struct buffer_page *last_page, *first_page; |
1261 | unsigned int nr_removed; | ||
1262 | unsigned long head_bit; | ||
1263 | int page_entries; | ||
1264 | |||
1265 | head_bit = 0; | ||
1244 | 1266 | ||
1245 | raw_spin_lock_irq(&cpu_buffer->reader_lock); | 1267 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
1246 | rb_head_page_deactivate(cpu_buffer); | 1268 | atomic_inc(&cpu_buffer->record_disabled); |
1269 | /* | ||
1270 | * We don't race with the readers since we have acquired the reader | ||
1271 | * lock. We also don't race with writers after disabling recording. | ||
1272 | * This makes it easy to figure out the first and the last page to be | ||
1273 | * removed from the list. We unlink all the pages in between including | ||
1274 | * the first and last pages. This is done in a busy loop so that we | ||
1275 | * lose the least number of traces. | ||
1276 | * The pages are freed after we restart recording and unlock readers. | ||
1277 | */ | ||
1278 | tail_page = &cpu_buffer->tail_page->list; | ||
1247 | 1279 | ||
1248 | for (i = 0; i < nr_pages; i++) { | 1280 | /* |
1249 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | 1281 | * tail page might be on reader page, we remove the next page |
1250 | goto out; | 1282 | * from the ring buffer |
1251 | p = cpu_buffer->pages->next; | 1283 | */ |
1252 | bpage = list_entry(p, struct buffer_page, list); | 1284 | if (cpu_buffer->tail_page == cpu_buffer->reader_page) |
1253 | list_del_init(&bpage->list); | 1285 | tail_page = rb_list_head(tail_page->next); |
1254 | free_buffer_page(bpage); | 1286 | to_remove = tail_page; |
1287 | |||
1288 | /* start of pages to remove */ | ||
1289 | first_page = list_entry(rb_list_head(to_remove->next), | ||
1290 | struct buffer_page, list); | ||
1291 | |||
1292 | for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { | ||
1293 | to_remove = rb_list_head(to_remove)->next; | ||
1294 | head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; | ||
1255 | } | 1295 | } |
1256 | if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages))) | ||
1257 | goto out; | ||
1258 | 1296 | ||
1259 | rb_reset_cpu(cpu_buffer); | 1297 | next_page = rb_list_head(to_remove)->next; |
1260 | rb_check_pages(cpu_buffer); | ||
1261 | 1298 | ||
1262 | out: | 1299 | /* |
1300 | * Now we remove all pages between tail_page and next_page. | ||
1301 | * Make sure that we have head_bit value preserved for the | ||
1302 | * next page | ||
1303 | */ | ||
1304 | tail_page->next = (struct list_head *)((unsigned long)next_page | | ||
1305 | head_bit); | ||
1306 | next_page = rb_list_head(next_page); | ||
1307 | next_page->prev = tail_page; | ||
1308 | |||
1309 | /* make sure pages points to a valid page in the ring buffer */ | ||
1310 | cpu_buffer->pages = next_page; | ||
1311 | |||
1312 | /* update head page */ | ||
1313 | if (head_bit) | ||
1314 | cpu_buffer->head_page = list_entry(next_page, | ||
1315 | struct buffer_page, list); | ||
1316 | |||
1317 | /* | ||
1318 | * change read pointer to make sure any read iterators reset | ||
1319 | * themselves | ||
1320 | */ | ||
1321 | cpu_buffer->read = 0; | ||
1322 | |||
1323 | /* pages are removed, resume tracing and then free the pages */ | ||
1324 | atomic_dec(&cpu_buffer->record_disabled); | ||
1263 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); | 1325 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
1326 | |||
1327 | RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); | ||
1328 | |||
1329 | /* last buffer page to remove */ | ||
1330 | last_page = list_entry(rb_list_head(to_remove), struct buffer_page, | ||
1331 | list); | ||
1332 | tmp_iter_page = first_page; | ||
1333 | |||
1334 | do { | ||
1335 | to_remove_page = tmp_iter_page; | ||
1336 | rb_inc_page(cpu_buffer, &tmp_iter_page); | ||
1337 | |||
1338 | /* update the counters */ | ||
1339 | page_entries = rb_page_entries(to_remove_page); | ||
1340 | if (page_entries) { | ||
1341 | /* | ||
1342 | * If something was added to this page, it was full | ||
1343 | * since it is not the tail page. So we deduct the | ||
1344 | * bytes consumed in ring buffer from here. | ||
1345 | * No need to update overruns, since this page is | ||
1346 | * deleted from ring buffer and its entries are | ||
1347 | * already accounted for. | ||
1348 | */ | ||
1349 | local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); | ||
1350 | } | ||
1351 | |||
1352 | /* | ||
1353 | * We have already removed references to this list item, just | ||
1354 | * free up the buffer_page and its page | ||
1355 | */ | ||
1356 | free_buffer_page(to_remove_page); | ||
1357 | nr_removed--; | ||
1358 | |||
1359 | } while (to_remove_page != last_page); | ||
1360 | |||
1361 | RB_WARN_ON(cpu_buffer, nr_removed); | ||
1264 | } | 1362 | } |
1265 | 1363 | ||
1266 | static void | 1364 | static void |
@@ -1272,6 +1370,8 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1272 | unsigned i; | 1370 | unsigned i; |
1273 | 1371 | ||
1274 | raw_spin_lock_irq(&cpu_buffer->reader_lock); | 1372 | raw_spin_lock_irq(&cpu_buffer->reader_lock); |
1373 | /* stop the writers while inserting pages */ | ||
1374 | atomic_inc(&cpu_buffer->record_disabled); | ||
1275 | rb_head_page_deactivate(cpu_buffer); | 1375 | rb_head_page_deactivate(cpu_buffer); |
1276 | 1376 | ||
1277 | for (i = 0; i < nr_pages; i++) { | 1377 | for (i = 0; i < nr_pages; i++) { |
@@ -1286,19 +1386,27 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer, | |||
1286 | rb_check_pages(cpu_buffer); | 1386 | rb_check_pages(cpu_buffer); |
1287 | 1387 | ||
1288 | out: | 1388 | out: |
1389 | atomic_dec(&cpu_buffer->record_disabled); | ||
1289 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); | 1390 | raw_spin_unlock_irq(&cpu_buffer->reader_lock); |
1290 | } | 1391 | } |
1291 | 1392 | ||
1292 | static void update_pages_handler(struct ring_buffer_per_cpu *cpu_buffer) | 1393 | static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) |
1293 | { | 1394 | { |
1294 | if (cpu_buffer->nr_pages_to_update > 0) | 1395 | if (cpu_buffer->nr_pages_to_update > 0) |
1295 | rb_insert_pages(cpu_buffer, &cpu_buffer->new_pages, | 1396 | rb_insert_pages(cpu_buffer, &cpu_buffer->new_pages, |
1296 | cpu_buffer->nr_pages_to_update); | 1397 | cpu_buffer->nr_pages_to_update); |
1297 | else | 1398 | else |
1298 | rb_remove_pages(cpu_buffer, -cpu_buffer->nr_pages_to_update); | 1399 | rb_remove_pages(cpu_buffer, -cpu_buffer->nr_pages_to_update); |
1400 | |||
1299 | cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; | 1401 | cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; |
1300 | /* reset this value */ | 1402 | } |
1301 | cpu_buffer->nr_pages_to_update = 0; | 1403 | |
1404 | static void update_pages_handler(struct work_struct *work) | ||
1405 | { | ||
1406 | struct ring_buffer_per_cpu *cpu_buffer = container_of(work, | ||
1407 | struct ring_buffer_per_cpu, update_pages_work); | ||
1408 | rb_update_pages(cpu_buffer); | ||
1409 | complete(&cpu_buffer->update_completion); | ||
1302 | } | 1410 | } |
1303 | 1411 | ||
1304 | /** | 1412 | /** |
@@ -1308,14 +1416,14 @@ static void update_pages_handler(struct ring_buffer_per_cpu *cpu_buffer) | |||
1308 | * | 1416 | * |
1309 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1417 | * Minimum size is 2 * BUF_PAGE_SIZE. |
1310 | * | 1418 | * |
1311 | * Returns -1 on failure. | 1419 | * Returns 0 on success and < 0 on failure. |
1312 | */ | 1420 | */ |
1313 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | 1421 | int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, |
1314 | int cpu_id) | 1422 | int cpu_id) |
1315 | { | 1423 | { |
1316 | struct ring_buffer_per_cpu *cpu_buffer; | 1424 | struct ring_buffer_per_cpu *cpu_buffer; |
1317 | unsigned nr_pages; | 1425 | unsigned nr_pages; |
1318 | int cpu; | 1426 | int cpu, err = 0; |
1319 | 1427 | ||
1320 | /* | 1428 | /* |
1321 | * Always succeed at resizing a non-existent buffer: | 1429 | * Always succeed at resizing a non-existent buffer: |
@@ -1330,15 +1438,18 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1330 | if (size < BUF_PAGE_SIZE * 2) | 1438 | if (size < BUF_PAGE_SIZE * 2) |
1331 | size = BUF_PAGE_SIZE * 2; | 1439 | size = BUF_PAGE_SIZE * 2; |
1332 | 1440 | ||
1333 | atomic_inc(&buffer->record_disabled); | 1441 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); |
1334 | 1442 | ||
1335 | /* Make sure all writers are done with this buffer. */ | 1443 | /* |
1336 | synchronize_sched(); | 1444 | * Don't succeed if resizing is disabled, as a reader might be |
1445 | * manipulating the ring buffer and is expecting a sane state while | ||
1446 | * this is true. | ||
1447 | */ | ||
1448 | if (atomic_read(&buffer->resize_disabled)) | ||
1449 | return -EBUSY; | ||
1337 | 1450 | ||
1451 | /* prevent another thread from changing buffer sizes */ | ||
1338 | mutex_lock(&buffer->mutex); | 1452 | mutex_lock(&buffer->mutex); |
1339 | get_online_cpus(); | ||
1340 | |||
1341 | nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); | ||
1342 | 1453 | ||
1343 | if (cpu_id == RING_BUFFER_ALL_CPUS) { | 1454 | if (cpu_id == RING_BUFFER_ALL_CPUS) { |
1344 | /* calculate the pages to update */ | 1455 | /* calculate the pages to update */ |
@@ -1347,33 +1458,67 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1347 | 1458 | ||
1348 | cpu_buffer->nr_pages_to_update = nr_pages - | 1459 | cpu_buffer->nr_pages_to_update = nr_pages - |
1349 | cpu_buffer->nr_pages; | 1460 | cpu_buffer->nr_pages; |
1350 | |||
1351 | /* | 1461 | /* |
1352 | * nothing more to do for removing pages or no update | 1462 | * nothing more to do for removing pages or no update |
1353 | */ | 1463 | */ |
1354 | if (cpu_buffer->nr_pages_to_update <= 0) | 1464 | if (cpu_buffer->nr_pages_to_update <= 0) |
1355 | continue; | 1465 | continue; |
1356 | |||
1357 | /* | 1466 | /* |
1358 | * to add pages, make sure all new pages can be | 1467 | * to add pages, make sure all new pages can be |
1359 | * allocated without receiving ENOMEM | 1468 | * allocated without receiving ENOMEM |
1360 | */ | 1469 | */ |
1361 | INIT_LIST_HEAD(&cpu_buffer->new_pages); | 1470 | INIT_LIST_HEAD(&cpu_buffer->new_pages); |
1362 | if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, | 1471 | if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, |
1363 | &cpu_buffer->new_pages, cpu)) | 1472 | &cpu_buffer->new_pages, cpu)) { |
1364 | /* not enough memory for new pages */ | 1473 | /* not enough memory for new pages */ |
1365 | goto no_mem; | 1474 | err = -ENOMEM; |
1475 | goto out_err; | ||
1476 | } | ||
1477 | } | ||
1478 | |||
1479 | get_online_cpus(); | ||
1480 | /* | ||
1481 | * Fire off all the required work handlers | ||
1482 | * Look out for offline CPUs | ||
1483 | */ | ||
1484 | for_each_buffer_cpu(buffer, cpu) { | ||
1485 | cpu_buffer = buffer->buffers[cpu]; | ||
1486 | if (!cpu_buffer->nr_pages_to_update || | ||
1487 | !cpu_online(cpu)) | ||
1488 | continue; | ||
1489 | |||
1490 | schedule_work_on(cpu, &cpu_buffer->update_pages_work); | ||
1491 | } | ||
1492 | /* | ||
1493 | * This loop is for the CPUs that are not online. | ||
1494 | * We can't schedule anything on them, but it's not necessary | ||
1495 | * since we can change their buffer sizes without any race. | ||
1496 | */ | ||
1497 | for_each_buffer_cpu(buffer, cpu) { | ||
1498 | cpu_buffer = buffer->buffers[cpu]; | ||
1499 | if (!cpu_buffer->nr_pages_to_update || | ||
1500 | cpu_online(cpu)) | ||
1501 | continue; | ||
1502 | |||
1503 | rb_update_pages(cpu_buffer); | ||
1366 | } | 1504 | } |
1367 | 1505 | ||
1368 | /* wait for all the updates to complete */ | 1506 | /* wait for all the updates to complete */ |
1369 | for_each_buffer_cpu(buffer, cpu) { | 1507 | for_each_buffer_cpu(buffer, cpu) { |
1370 | cpu_buffer = buffer->buffers[cpu]; | 1508 | cpu_buffer = buffer->buffers[cpu]; |
1371 | if (cpu_buffer->nr_pages_to_update) { | 1509 | if (!cpu_buffer->nr_pages_to_update || |
1372 | update_pages_handler(cpu_buffer); | 1510 | !cpu_online(cpu)) |
1373 | } | 1511 | continue; |
1512 | |||
1513 | wait_for_completion(&cpu_buffer->update_completion); | ||
1514 | /* reset this value */ | ||
1515 | cpu_buffer->nr_pages_to_update = 0; | ||
1374 | } | 1516 | } |
1517 | |||
1518 | put_online_cpus(); | ||
1375 | } else { | 1519 | } else { |
1376 | cpu_buffer = buffer->buffers[cpu_id]; | 1520 | cpu_buffer = buffer->buffers[cpu_id]; |
1521 | |||
1377 | if (nr_pages == cpu_buffer->nr_pages) | 1522 | if (nr_pages == cpu_buffer->nr_pages) |
1378 | goto out; | 1523 | goto out; |
1379 | 1524 | ||
@@ -1383,38 +1528,47 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, | |||
1383 | INIT_LIST_HEAD(&cpu_buffer->new_pages); | 1528 | INIT_LIST_HEAD(&cpu_buffer->new_pages); |
1384 | if (cpu_buffer->nr_pages_to_update > 0 && | 1529 | if (cpu_buffer->nr_pages_to_update > 0 && |
1385 | __rb_allocate_pages(cpu_buffer->nr_pages_to_update, | 1530 | __rb_allocate_pages(cpu_buffer->nr_pages_to_update, |
1386 | &cpu_buffer->new_pages, cpu_id)) | 1531 | &cpu_buffer->new_pages, cpu_id)) { |
1387 | goto no_mem; | 1532 | err = -ENOMEM; |
1533 | goto out_err; | ||
1534 | } | ||
1388 | 1535 | ||
1389 | update_pages_handler(cpu_buffer); | 1536 | get_online_cpus(); |
1537 | |||
1538 | if (cpu_online(cpu_id)) { | ||
1539 | schedule_work_on(cpu_id, | ||
1540 | &cpu_buffer->update_pages_work); | ||
1541 | wait_for_completion(&cpu_buffer->update_completion); | ||
1542 | } else | ||
1543 | rb_update_pages(cpu_buffer); | ||
1544 | |||
1545 | put_online_cpus(); | ||
1546 | /* reset this value */ | ||
1547 | cpu_buffer->nr_pages_to_update = 0; | ||
1390 | } | 1548 | } |
1391 | 1549 | ||
1392 | out: | 1550 | out: |
1393 | put_online_cpus(); | ||
1394 | mutex_unlock(&buffer->mutex); | 1551 | mutex_unlock(&buffer->mutex); |
1395 | |||
1396 | atomic_dec(&buffer->record_disabled); | ||
1397 | |||
1398 | return size; | 1552 | return size; |
1399 | 1553 | ||
1400 | no_mem: | 1554 | out_err: |
1401 | for_each_buffer_cpu(buffer, cpu) { | 1555 | for_each_buffer_cpu(buffer, cpu) { |
1402 | struct buffer_page *bpage, *tmp; | 1556 | struct buffer_page *bpage, *tmp; |
1557 | |||
1403 | cpu_buffer = buffer->buffers[cpu]; | 1558 | cpu_buffer = buffer->buffers[cpu]; |
1404 | /* reset this number regardless */ | ||
1405 | cpu_buffer->nr_pages_to_update = 0; | 1559 | cpu_buffer->nr_pages_to_update = 0; |
1560 | |||
1406 | if (list_empty(&cpu_buffer->new_pages)) | 1561 | if (list_empty(&cpu_buffer->new_pages)) |
1407 | continue; | 1562 | continue; |
1563 | |||
1408 | list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, | 1564 | list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, |
1409 | list) { | 1565 | list) { |
1410 | list_del_init(&bpage->list); | 1566 | list_del_init(&bpage->list); |
1411 | free_buffer_page(bpage); | 1567 | free_buffer_page(bpage); |
1412 | } | 1568 | } |
1413 | } | 1569 | } |
1414 | put_online_cpus(); | ||
1415 | mutex_unlock(&buffer->mutex); | 1570 | mutex_unlock(&buffer->mutex); |
1416 | atomic_dec(&buffer->record_disabled); | 1571 | return err; |
1417 | return -ENOMEM; | ||
1418 | } | 1572 | } |
1419 | EXPORT_SYMBOL_GPL(ring_buffer_resize); | 1573 | EXPORT_SYMBOL_GPL(ring_buffer_resize); |
1420 | 1574 | ||
@@ -1453,21 +1607,11 @@ rb_iter_head_event(struct ring_buffer_iter *iter) | |||
1453 | return __rb_page_index(iter->head_page, iter->head); | 1607 | return __rb_page_index(iter->head_page, iter->head); |
1454 | } | 1608 | } |
1455 | 1609 | ||
1456 | static inline unsigned long rb_page_write(struct buffer_page *bpage) | ||
1457 | { | ||
1458 | return local_read(&bpage->write) & RB_WRITE_MASK; | ||
1459 | } | ||
1460 | |||
1461 | static inline unsigned rb_page_commit(struct buffer_page *bpage) | 1610 | static inline unsigned rb_page_commit(struct buffer_page *bpage) |
1462 | { | 1611 | { |
1463 | return local_read(&bpage->page->commit); | 1612 | return local_read(&bpage->page->commit); |
1464 | } | 1613 | } |
1465 | 1614 | ||
1466 | static inline unsigned long rb_page_entries(struct buffer_page *bpage) | ||
1467 | { | ||
1468 | return local_read(&bpage->entries) & RB_WRITE_MASK; | ||
1469 | } | ||
1470 | |||
1471 | /* Size is determined by what has been committed */ | 1615 | /* Size is determined by what has been committed */ |
1472 | static inline unsigned rb_page_size(struct buffer_page *bpage) | 1616 | static inline unsigned rb_page_size(struct buffer_page *bpage) |
1473 | { | 1617 | { |
@@ -3492,6 +3636,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | |||
3492 | 3636 | ||
3493 | iter->cpu_buffer = cpu_buffer; | 3637 | iter->cpu_buffer = cpu_buffer; |
3494 | 3638 | ||
3639 | atomic_inc(&buffer->resize_disabled); | ||
3495 | atomic_inc(&cpu_buffer->record_disabled); | 3640 | atomic_inc(&cpu_buffer->record_disabled); |
3496 | 3641 | ||
3497 | return iter; | 3642 | return iter; |
@@ -3555,6 +3700,7 @@ ring_buffer_read_finish(struct ring_buffer_iter *iter) | |||
3555 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 3700 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
3556 | 3701 | ||
3557 | atomic_dec(&cpu_buffer->record_disabled); | 3702 | atomic_dec(&cpu_buffer->record_disabled); |
3703 | atomic_dec(&cpu_buffer->buffer->resize_disabled); | ||
3558 | kfree(iter); | 3704 | kfree(iter); |
3559 | } | 3705 | } |
3560 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); | 3706 | EXPORT_SYMBOL_GPL(ring_buffer_read_finish); |
@@ -3662,8 +3808,12 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3662 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 3808 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
3663 | return; | 3809 | return; |
3664 | 3810 | ||
3811 | atomic_inc(&buffer->resize_disabled); | ||
3665 | atomic_inc(&cpu_buffer->record_disabled); | 3812 | atomic_inc(&cpu_buffer->record_disabled); |
3666 | 3813 | ||
3814 | /* Make sure all commits have finished */ | ||
3815 | synchronize_sched(); | ||
3816 | |||
3667 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 3817 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
3668 | 3818 | ||
3669 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) | 3819 | if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) |
@@ -3679,6 +3829,7 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) | |||
3679 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 3829 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
3680 | 3830 | ||
3681 | atomic_dec(&cpu_buffer->record_disabled); | 3831 | atomic_dec(&cpu_buffer->record_disabled); |
3832 | atomic_dec(&buffer->resize_disabled); | ||
3682 | } | 3833 | } |
3683 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); | 3834 | EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); |
3684 | 3835 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d1b3469b62e3..dfbd86cc4876 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3076,20 +3076,10 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu) | |||
3076 | 3076 | ||
3077 | static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) | 3077 | static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) |
3078 | { | 3078 | { |
3079 | int cpu, ret = size; | 3079 | int ret = size; |
3080 | 3080 | ||
3081 | mutex_lock(&trace_types_lock); | 3081 | mutex_lock(&trace_types_lock); |
3082 | 3082 | ||
3083 | tracing_stop(); | ||
3084 | |||
3085 | /* disable all cpu buffers */ | ||
3086 | for_each_tracing_cpu(cpu) { | ||
3087 | if (global_trace.data[cpu]) | ||
3088 | atomic_inc(&global_trace.data[cpu]->disabled); | ||
3089 | if (max_tr.data[cpu]) | ||
3090 | atomic_inc(&max_tr.data[cpu]->disabled); | ||
3091 | } | ||
3092 | |||
3093 | if (cpu_id != RING_BUFFER_ALL_CPUS) { | 3083 | if (cpu_id != RING_BUFFER_ALL_CPUS) { |
3094 | /* make sure, this cpu is enabled in the mask */ | 3084 | /* make sure, this cpu is enabled in the mask */ |
3095 | if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { | 3085 | if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) { |
@@ -3103,14 +3093,6 @@ static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id) | |||
3103 | ret = -ENOMEM; | 3093 | ret = -ENOMEM; |
3104 | 3094 | ||
3105 | out: | 3095 | out: |
3106 | for_each_tracing_cpu(cpu) { | ||
3107 | if (global_trace.data[cpu]) | ||
3108 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
3109 | if (max_tr.data[cpu]) | ||
3110 | atomic_dec(&max_tr.data[cpu]->disabled); | ||
3111 | } | ||
3112 | |||
3113 | tracing_start(); | ||
3114 | mutex_unlock(&trace_types_lock); | 3096 | mutex_unlock(&trace_types_lock); |
3115 | 3097 | ||
3116 | return ret; | 3098 | return ret; |