diff options
author | Markus Metzger <markus.t.metzger@intel.com> | 2009-09-15 07:00:23 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-18 14:43:20 -0400 |
commit | 5622f295b53fb60dbf9bed3e2c89d182490a8b7f (patch) | |
tree | 8279554bddd1607d53dc06e97f4b5a1d0c085ccd /arch | |
parent | 4b77a7297795229eca96c41e1709a3c87909fabe (diff) |
x86, perf_counter, bts: Optimize BTS overflow handling
Draining the BTS buffer on a buffer overflow interrupt takes too
long resulting in a kernel lockup when tracing the kernel.
Restructure perf_counter sampling into sample creation and sample
output.
Prepare a single reference sample for BTS sampling and update the
from and to address fields when draining the BTS buffer. Drain the
entire BTS buffer between a single perf_output_begin() /
perf_output_end() pair.
Signed-off-by: Markus Metzger <markus.t.metzger@intel.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20090915130023.A16204@sedona.ch.intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 60 |
1 files changed, 37 insertions, 23 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f9cd0849bd42..6a0e71b38126 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -36,10 +36,10 @@ static u64 perf_counter_mask __read_mostly; | |||
36 | #define BTS_RECORD_SIZE 24 | 36 | #define BTS_RECORD_SIZE 24 |
37 | 37 | ||
38 | /* The size of a per-cpu BTS buffer in bytes: */ | 38 | /* The size of a per-cpu BTS buffer in bytes: */ |
39 | #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 1024) | 39 | #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048) |
40 | 40 | ||
41 | /* The BTS overflow threshold in bytes from the end of the buffer: */ | 41 | /* The BTS overflow threshold in bytes from the end of the buffer: */ |
42 | #define BTS_OVFL_TH (BTS_RECORD_SIZE * 64) | 42 | #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128) |
43 | 43 | ||
44 | 44 | ||
45 | /* | 45 | /* |
@@ -1488,8 +1488,7 @@ void perf_counter_print_debug(void) | |||
1488 | local_irq_restore(flags); | 1488 | local_irq_restore(flags); |
1489 | } | 1489 | } |
1490 | 1490 | ||
1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | 1491 | static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc) |
1492 | struct perf_sample_data *data) | ||
1493 | { | 1492 | { |
1494 | struct debug_store *ds = cpuc->ds; | 1493 | struct debug_store *ds = cpuc->ds; |
1495 | struct bts_record { | 1494 | struct bts_record { |
@@ -1498,8 +1497,11 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | |||
1498 | u64 flags; | 1497 | u64 flags; |
1499 | }; | 1498 | }; |
1500 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; | 1499 | struct perf_counter *counter = cpuc->counters[X86_PMC_IDX_FIXED_BTS]; |
1501 | unsigned long orig_ip = data->regs->ip; | ||
1502 | struct bts_record *at, *top; | 1500 | struct bts_record *at, *top; |
1501 | struct perf_output_handle handle; | ||
1502 | struct perf_event_header header; | ||
1503 | struct perf_sample_data data; | ||
1504 | struct pt_regs regs; | ||
1503 | 1505 | ||
1504 | if (!counter) | 1506 | if (!counter) |
1505 | return; | 1507 | return; |
@@ -1510,19 +1512,38 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_counters *cpuc, | |||
1510 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; | 1512 | at = (struct bts_record *)(unsigned long)ds->bts_buffer_base; |
1511 | top = (struct bts_record *)(unsigned long)ds->bts_index; | 1513 | top = (struct bts_record *)(unsigned long)ds->bts_index; |
1512 | 1514 | ||
1515 | if (top <= at) | ||
1516 | return; | ||
1517 | |||
1513 | ds->bts_index = ds->bts_buffer_base; | 1518 | ds->bts_index = ds->bts_buffer_base; |
1514 | 1519 | ||
1520 | |||
1521 | data.period = counter->hw.last_period; | ||
1522 | data.addr = 0; | ||
1523 | regs.ip = 0; | ||
1524 | |||
1525 | /* | ||
1526 | * Prepare a generic sample, i.e. fill in the invariant fields. | ||
1527 | * We will overwrite the from and to address before we output | ||
1528 | * the sample. | ||
1529 | */ | ||
1530 | perf_prepare_sample(&header, &data, counter, ®s); | ||
1531 | |||
1532 | if (perf_output_begin(&handle, counter, | ||
1533 | header.size * (top - at), 1, 1)) | ||
1534 | return; | ||
1535 | |||
1515 | for (; at < top; at++) { | 1536 | for (; at < top; at++) { |
1516 | data->regs->ip = at->from; | 1537 | data.ip = at->from; |
1517 | data->addr = at->to; | 1538 | data.addr = at->to; |
1518 | 1539 | ||
1519 | perf_counter_output(counter, 1, data); | 1540 | perf_output_sample(&handle, &header, &data, counter); |
1520 | } | 1541 | } |
1521 | 1542 | ||
1522 | data->regs->ip = orig_ip; | 1543 | perf_output_end(&handle); |
1523 | data->addr = 0; | ||
1524 | 1544 | ||
1525 | /* There's new data available. */ | 1545 | /* There's new data available. */ |
1546 | counter->hw.interrupts++; | ||
1526 | counter->pending_kill = POLL_IN; | 1547 | counter->pending_kill = POLL_IN; |
1527 | } | 1548 | } |
1528 | 1549 | ||
@@ -1552,13 +1573,9 @@ static void x86_pmu_disable(struct perf_counter *counter) | |||
1552 | x86_perf_counter_update(counter, hwc, idx); | 1573 | x86_perf_counter_update(counter, hwc, idx); |
1553 | 1574 | ||
1554 | /* Drain the remaining BTS records. */ | 1575 | /* Drain the remaining BTS records. */ |
1555 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 1576 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) |
1556 | struct perf_sample_data data; | 1577 | intel_pmu_drain_bts_buffer(cpuc); |
1557 | struct pt_regs regs; | ||
1558 | 1578 | ||
1559 | data.regs = ®s; | ||
1560 | intel_pmu_drain_bts_buffer(cpuc, &data); | ||
1561 | } | ||
1562 | cpuc->counters[idx] = NULL; | 1579 | cpuc->counters[idx] = NULL; |
1563 | clear_bit(idx, cpuc->used_mask); | 1580 | clear_bit(idx, cpuc->used_mask); |
1564 | 1581 | ||
@@ -1619,7 +1636,6 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) | |||
1619 | int idx, handled = 0; | 1636 | int idx, handled = 0; |
1620 | u64 val; | 1637 | u64 val; |
1621 | 1638 | ||
1622 | data.regs = regs; | ||
1623 | data.addr = 0; | 1639 | data.addr = 0; |
1624 | 1640 | ||
1625 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1641 | cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -1644,7 +1660,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs) | |||
1644 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | 1660 | if (!x86_perf_counter_set_period(counter, hwc, idx)) |
1645 | continue; | 1661 | continue; |
1646 | 1662 | ||
1647 | if (perf_counter_overflow(counter, 1, &data)) | 1663 | if (perf_counter_overflow(counter, 1, &data, regs)) |
1648 | p6_pmu_disable_counter(hwc, idx); | 1664 | p6_pmu_disable_counter(hwc, idx); |
1649 | } | 1665 | } |
1650 | 1666 | ||
@@ -1665,13 +1681,12 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
1665 | int bit, loops; | 1681 | int bit, loops; |
1666 | u64 ack, status; | 1682 | u64 ack, status; |
1667 | 1683 | ||
1668 | data.regs = regs; | ||
1669 | data.addr = 0; | 1684 | data.addr = 0; |
1670 | 1685 | ||
1671 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1686 | cpuc = &__get_cpu_var(cpu_hw_counters); |
1672 | 1687 | ||
1673 | perf_disable(); | 1688 | perf_disable(); |
1674 | intel_pmu_drain_bts_buffer(cpuc, &data); | 1689 | intel_pmu_drain_bts_buffer(cpuc); |
1675 | status = intel_pmu_get_status(); | 1690 | status = intel_pmu_get_status(); |
1676 | if (!status) { | 1691 | if (!status) { |
1677 | perf_enable(); | 1692 | perf_enable(); |
@@ -1702,7 +1717,7 @@ again: | |||
1702 | 1717 | ||
1703 | data.period = counter->hw.last_period; | 1718 | data.period = counter->hw.last_period; |
1704 | 1719 | ||
1705 | if (perf_counter_overflow(counter, 1, &data)) | 1720 | if (perf_counter_overflow(counter, 1, &data, regs)) |
1706 | intel_pmu_disable_counter(&counter->hw, bit); | 1721 | intel_pmu_disable_counter(&counter->hw, bit); |
1707 | } | 1722 | } |
1708 | 1723 | ||
@@ -1729,7 +1744,6 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
1729 | int idx, handled = 0; | 1744 | int idx, handled = 0; |
1730 | u64 val; | 1745 | u64 val; |
1731 | 1746 | ||
1732 | data.regs = regs; | ||
1733 | data.addr = 0; | 1747 | data.addr = 0; |
1734 | 1748 | ||
1735 | cpuc = &__get_cpu_var(cpu_hw_counters); | 1749 | cpuc = &__get_cpu_var(cpu_hw_counters); |
@@ -1754,7 +1768,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) | |||
1754 | if (!x86_perf_counter_set_period(counter, hwc, idx)) | 1768 | if (!x86_perf_counter_set_period(counter, hwc, idx)) |
1755 | continue; | 1769 | continue; |
1756 | 1770 | ||
1757 | if (perf_counter_overflow(counter, 1, &data)) | 1771 | if (perf_counter_overflow(counter, 1, &data, regs)) |
1758 | amd_pmu_disable_counter(hwc, idx); | 1772 | amd_pmu_disable_counter(hwc, idx); |
1759 | } | 1773 | } |
1760 | 1774 | ||