aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2008-10-16 09:31:27 -0400
committerIngo Molnar <mingo@elte.hu>2008-10-20 12:27:01 -0400
commitbd95b88d9e51fcbf392a7e90338a8fcc3499cbd6 (patch)
treea88df2bd756a4dd0715a92c3cec193366d86b861 /kernel/trace
parentc513867561eeb07d24a0bdda1a18a8f91921a301 (diff)
ftrace: release functions from hash
The x86 architecture uses a static recording of mcount caller locations and is not affected by this patch. For architectures still using the dynamic ftrace daemon, this patch is critical. It removes the race between the recording of a function that calls mcount, the unloading of a module, and the ftrace daemon updating the call sites. This patch adds the releasing of the hash functions that the daemon uses to update the mcount call sites. When a module is unloaded, not only are the replaced call site table update, but now so is the hash recorded functions that the ftrace daemon will use. Again, architectures that implement MCOUNT_RECORD are not affected by this (which currently only x86 has). Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c43
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4dda4f60a2a9..1f54a94189fe 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -164,10 +164,14 @@ static DEFINE_SPINLOCK(ftrace_hash_lock);
164#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags) 164#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
165#define ftrace_hash_unlock(flags) \ 165#define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags) 166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167static void ftrace_release_hash(unsigned long start, unsigned long end);
167#else 168#else
168/* This is protected via the ftrace_lock with MCOUNT_RECORD. */ 169/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
169#define ftrace_hash_lock(flags) do { (void)(flags); } while (0) 170#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
170#define ftrace_hash_unlock(flags) do { } while(0) 171#define ftrace_hash_unlock(flags) do { } while(0)
172static inline void ftrace_release_hash(unsigned long start, unsigned long end)
173{
174}
171#endif 175#endif
172 176
173/* 177/*
@@ -347,6 +351,7 @@ void ftrace_release(void *start, unsigned long size)
347 } 351 }
348 spin_unlock(&ftrace_lock); 352 spin_unlock(&ftrace_lock);
349 353
354 ftrace_release_hash(s, e);
350} 355}
351 356
352static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 357static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
@@ -1659,6 +1664,44 @@ void __init ftrace_init(void)
1659 ftrace_disabled = 1; 1664 ftrace_disabled = 1;
1660} 1665}
1661#else /* CONFIG_FTRACE_MCOUNT_RECORD */ 1666#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1667
1668static void ftrace_release_hash(unsigned long start, unsigned long end)
1669{
1670 struct dyn_ftrace *rec;
1671 struct hlist_node *t, *n;
1672 struct hlist_head *head, temp_list;
1673 unsigned long flags;
1674 int i, cpu;
1675
1676 preempt_disable_notrace();
1677
1678 /* disable incase we call something that calls mcount */
1679 cpu = raw_smp_processor_id();
1680 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
1681
1682 ftrace_hash_lock(flags);
1683
1684 for (i = 0; i < FTRACE_HASHSIZE; i++) {
1685 INIT_HLIST_HEAD(&temp_list);
1686 head = &ftrace_hash[i];
1687
1688 /* all CPUS are stopped, we are safe to modify code */
1689 hlist_for_each_entry_safe(rec, t, n, head, node) {
1690 if (rec->flags & FTRACE_FL_FREE)
1691 continue;
1692
1693 if ((rec->ip >= start) && (rec->ip < end))
1694 ftrace_free_rec(rec);
1695 }
1696 }
1697
1698 ftrace_hash_unlock(flags);
1699
1700 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
1701 preempt_enable_notrace();
1702
1703}
1704
1662static int ftraced(void *ignore) 1705static int ftraced(void *ignore)
1663{ 1706{
1664 unsigned long usecs; 1707 unsigned long usecs;