diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-04-19 17:39:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-20 04:59:20 -0400 |
commit | f3b9aae16219aaeca2dd5a9ca69f7a10faa063df (patch) | |
tree | 67086cbe4b26fc29035f7b800f56d16a653373f7 /kernel/trace/ring_buffer.c | |
parent | e057a5e5647a1c9d0d0054fbd298bfa04b3d1cb4 (diff) |
tracing/ring-buffer: Add unlock recursion protection on discard
The pair of helpers trace_recursive_lock() and trace_recursive_unlock()
have been introduced recently to provide generic tracing recursion
protection.
They are used in a symetric way:
- trace_recursive_lock() on buffer reserve
- trace_recursive_unlock() on buffer commit
However sometimes, we don't commit but discard on entry
to the buffer, ie: in case of filter checking.
Then we must also unlock the recursion protection on discard time,
otherwise the tracing gets definitely deactivated and a warning
is raised spuriously, such as:
111.119821] ------------[ cut here ]------------
[ 111.119829] WARNING: at kernel/trace/ring_buffer.c:1498 ring_buffer_lock_reserve+0x1b7/0x1d0()
[ 111.119835] Hardware name: AMILO Li 2727
[ 111.119839] Modules linked in:
[ 111.119846] Pid: 5731, comm: Xorg Tainted: G W 2.6.30-rc1 #69
[ 111.119851] Call Trace:
[ 111.119863] [<ffffffff8025ce68>] warn_slowpath+0xd8/0x130
[ 111.119873] [<ffffffff8028a30f>] ? __lock_acquire+0x19f/0x1ae0
[ 111.119882] [<ffffffff8028a30f>] ? __lock_acquire+0x19f/0x1ae0
[ 111.119891] [<ffffffff802199b0>] ? native_sched_clock+0x20/0x70
[ 111.119899] [<ffffffff80286dee>] ? put_lock_stats+0xe/0x30
[ 111.119906] [<ffffffff80286eb8>] ? lock_release_holdtime+0xa8/0x150
[ 111.119913] [<ffffffff802c8ae7>] ring_buffer_lock_reserve+0x1b7/0x1d0
[ 111.119921] [<ffffffff802cd110>] trace_buffer_lock_reserve+0x30/0x70
[ 111.119930] [<ffffffff802ce000>] trace_current_buffer_lock_reserve+0x20/0x30
[ 111.119939] [<ffffffff802474e8>] ftrace_raw_event_sched_switch+0x58/0x100
[ 111.119948] [<ffffffff808103b7>] __schedule+0x3a7/0x4cd
[ 111.119957] [<ffffffff80211b56>] ? ftrace_call+0x5/0x2b
[ 111.119964] [<ffffffff80211b56>] ? ftrace_call+0x5/0x2b
[ 111.119971] [<ffffffff80810c08>] schedule+0x18/0x40
[ 111.119977] [<ffffffff80810e09>] preempt_schedule+0x39/0x60
[ 111.119985] [<ffffffff80813bd3>] _read_unlock+0x53/0x60
[ 111.119993] [<ffffffff807259d2>] sock_def_readable+0x72/0x80
[ 111.120002] [<ffffffff807ad5ed>] unix_stream_sendmsg+0x24d/0x3d0
[ 111.120011] [<ffffffff807219a3>] sock_aio_write+0x143/0x160
[ 111.120019] [<ffffffff80211b56>] ? ftrace_call+0x5/0x2b
[ 111.120026] [<ffffffff80721860>] ? sock_aio_write+0x0/0x160
[ 111.120033] [<ffffffff80721860>] ? sock_aio_write+0x0/0x160
[ 111.120042] [<ffffffff8031c283>] do_sync_readv_writev+0xf3/0x140
[ 111.120049] [<ffffffff80211b56>] ? ftrace_call+0x5/0x2b
[ 111.120057] [<ffffffff80276ff0>] ? autoremove_wake_function+0x0/0x40
[ 111.120067] [<ffffffff8045d489>] ? cap_file_permission+0x9/0x10
[ 111.120074] [<ffffffff8045c1e6>] ? security_file_permission+0x16/0x20
[ 111.120082] [<ffffffff8031cab4>] do_readv_writev+0xd4/0x1f0
[ 111.120089] [<ffffffff80211b56>] ? ftrace_call+0x5/0x2b
[ 111.120097] [<ffffffff80211b56>] ? ftrace_call+0x5/0x2b
[ 111.120105] [<ffffffff8031cc18>] vfs_writev+0x48/0x70
[ 111.120111] [<ffffffff8031cd65>] sys_writev+0x55/0xc0
[ 111.120119] [<ffffffff80211e32>] system_call_fastpath+0x16/0x1b
[ 111.120125] ---[ end trace 15605f4e98d5ccb5 ]---
[ Impact: fix spurious warning triggering tracing shutdown ]
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 18 |
1 files changed, 13 insertions, 5 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bffde630c4e2..e145969a8eda 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1642,6 +1642,14 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1642 | } | 1642 | } |
1643 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | 1643 | EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); |
1644 | 1644 | ||
1645 | static inline void rb_event_discard(struct ring_buffer_event *event) | ||
1646 | { | ||
1647 | event->type = RINGBUF_TYPE_PADDING; | ||
1648 | /* time delta must be non zero */ | ||
1649 | if (!event->time_delta) | ||
1650 | event->time_delta = 1; | ||
1651 | } | ||
1652 | |||
1645 | /** | 1653 | /** |
1646 | * ring_buffer_event_discard - discard any event in the ring buffer | 1654 | * ring_buffer_event_discard - discard any event in the ring buffer |
1647 | * @event: the event to discard | 1655 | * @event: the event to discard |
@@ -1656,10 +1664,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); | |||
1656 | */ | 1664 | */ |
1657 | void ring_buffer_event_discard(struct ring_buffer_event *event) | 1665 | void ring_buffer_event_discard(struct ring_buffer_event *event) |
1658 | { | 1666 | { |
1659 | event->type = RINGBUF_TYPE_PADDING; | 1667 | rb_event_discard(event); |
1660 | /* time delta must be non zero */ | 1668 | trace_recursive_unlock(); |
1661 | if (!event->time_delta) | ||
1662 | event->time_delta = 1; | ||
1663 | } | 1669 | } |
1664 | EXPORT_SYMBOL_GPL(ring_buffer_event_discard); | 1670 | EXPORT_SYMBOL_GPL(ring_buffer_event_discard); |
1665 | 1671 | ||
@@ -1690,7 +1696,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
1690 | int cpu; | 1696 | int cpu; |
1691 | 1697 | ||
1692 | /* The event is discarded regardless */ | 1698 | /* The event is discarded regardless */ |
1693 | ring_buffer_event_discard(event); | 1699 | rb_event_discard(event); |
1694 | 1700 | ||
1695 | /* | 1701 | /* |
1696 | * This must only be called if the event has not been | 1702 | * This must only be called if the event has not been |
@@ -1735,6 +1741,8 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
1735 | if (rb_is_commit(cpu_buffer, event)) | 1741 | if (rb_is_commit(cpu_buffer, event)) |
1736 | rb_set_commit_to_write(cpu_buffer); | 1742 | rb_set_commit_to_write(cpu_buffer); |
1737 | 1743 | ||
1744 | trace_recursive_unlock(); | ||
1745 | |||
1738 | /* | 1746 | /* |
1739 | * Only the last preempt count needs to restore preemption. | 1747 | * Only the last preempt count needs to restore preemption. |
1740 | */ | 1748 | */ |