diff options
author | Steven Rostedt <rostedt@goodmis.org> | 2008-08-15 21:40:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-14 04:35:47 -0400 |
commit | 99ecdc43bc17faf5fa571db8569df171ecd0e5b8 (patch) | |
tree | aedefb2859247aefe7538e479ab71bcd3a1717c8 /kernel/trace/ftrace.c | |
parent | 00fd61aee10533e003f2f00ab7163207660a4051 (diff) |
ftrace: add necessary locking for ftrace records
The new design of pre-recorded mcounts and updating the code outside of
kstop_machine has changed the way the records themselves are protected.
This patch uses the ftrace_lock to protect the records. Note, the lock
still does not need to be taken within calls that are only called via
kstop_machine, since the that code can not run while the spin lock is held.
Also removed the hash_lock needed for the daemon when MCOUNT_RECORD is
configured. Also did a slight cleanup of an unused variable.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 44 |
1 files changed, 30 insertions, 14 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 11d94f2dc485..43665add9805 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -81,7 +81,7 @@ void clear_ftrace_function(void) | |||
81 | 81 | ||
82 | static int __register_ftrace_function(struct ftrace_ops *ops) | 82 | static int __register_ftrace_function(struct ftrace_ops *ops) |
83 | { | 83 | { |
84 | /* Should never be called by interrupts */ | 84 | /* should not be called from interrupt context */ |
85 | spin_lock(&ftrace_lock); | 85 | spin_lock(&ftrace_lock); |
86 | 86 | ||
87 | ops->next = ftrace_list; | 87 | ops->next = ftrace_list; |
@@ -115,6 +115,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
115 | struct ftrace_ops **p; | 115 | struct ftrace_ops **p; |
116 | int ret = 0; | 116 | int ret = 0; |
117 | 117 | ||
118 | /* should not be called from interrupt context */ | ||
118 | spin_lock(&ftrace_lock); | 119 | spin_lock(&ftrace_lock); |
119 | 120 | ||
120 | /* | 121 | /* |
@@ -153,6 +154,21 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
153 | 154 | ||
154 | #ifdef CONFIG_DYNAMIC_FTRACE | 155 | #ifdef CONFIG_DYNAMIC_FTRACE |
155 | 156 | ||
157 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | ||
158 | /* | ||
159 | * The hash lock is only needed when the recording of the mcount | ||
160 | * callers are dynamic. That is, by the caller themselves and | ||
161 | * not recorded via the compilation. | ||
162 | */ | ||
163 | static DEFINE_SPINLOCK(ftrace_hash_lock); | ||
164 | #define ftrace_hash_lock(flags) spin_lock_irqsave(ftrace_hash_lock, flags) | ||
165 | #define ftrace_hash_unlock(flags) spin_lock_irqsave(ftrace_hash_lock, flags) | ||
166 | #else | ||
167 | /* This is protected via the ftrace_lock with MCOUNT_RECORD. */ | ||
168 | #define ftrace_hash_lock(flags) do { (void)flags; } while (0) | ||
169 | #define ftrace_hash_unlock(flags) do { } while(0) | ||
170 | #endif | ||
171 | |||
156 | static struct task_struct *ftraced_task; | 172 | static struct task_struct *ftraced_task; |
157 | 173 | ||
158 | enum { | 174 | enum { |
@@ -171,7 +187,6 @@ static struct hlist_head ftrace_hash[FTRACE_HASHSIZE]; | |||
171 | 187 | ||
172 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); | 188 | static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); |
173 | 189 | ||
174 | static DEFINE_SPINLOCK(ftrace_shutdown_lock); | ||
175 | static DEFINE_MUTEX(ftraced_lock); | 190 | static DEFINE_MUTEX(ftraced_lock); |
176 | static DEFINE_MUTEX(ftrace_regex_lock); | 191 | static DEFINE_MUTEX(ftrace_regex_lock); |
177 | 192 | ||
@@ -310,7 +325,7 @@ void ftrace_release(void *start, unsigned long size) | |||
310 | if (ftrace_disabled || !start) | 325 | if (ftrace_disabled || !start) |
311 | return; | 326 | return; |
312 | 327 | ||
313 | /* No interrupt should call this */ | 328 | /* should not be called from interrupt context */ |
314 | spin_lock(&ftrace_lock); | 329 | spin_lock(&ftrace_lock); |
315 | 330 | ||
316 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 331 | for (pg = ftrace_pages_start; pg; pg = pg->next) { |
@@ -362,7 +377,6 @@ ftrace_record_ip(unsigned long ip) | |||
362 | unsigned long flags; | 377 | unsigned long flags; |
363 | unsigned long key; | 378 | unsigned long key; |
364 | int resched; | 379 | int resched; |
365 | int atomic; | ||
366 | int cpu; | 380 | int cpu; |
367 | 381 | ||
368 | if (!ftrace_enabled || ftrace_disabled) | 382 | if (!ftrace_enabled || ftrace_disabled) |
@@ -392,9 +406,7 @@ ftrace_record_ip(unsigned long ip) | |||
392 | if (ftrace_ip_in_hash(ip, key)) | 406 | if (ftrace_ip_in_hash(ip, key)) |
393 | goto out; | 407 | goto out; |
394 | 408 | ||
395 | atomic = irqs_disabled(); | 409 | ftrace_hash_lock(flags); |
396 | |||
397 | spin_lock_irqsave(&ftrace_shutdown_lock, flags); | ||
398 | 410 | ||
399 | /* This ip may have hit the hash before the lock */ | 411 | /* This ip may have hit the hash before the lock */ |
400 | if (ftrace_ip_in_hash(ip, key)) | 412 | if (ftrace_ip_in_hash(ip, key)) |
@@ -411,7 +423,7 @@ ftrace_record_ip(unsigned long ip) | |||
411 | ftraced_trigger = 1; | 423 | ftraced_trigger = 1; |
412 | 424 | ||
413 | out_unlock: | 425 | out_unlock: |
414 | spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); | 426 | ftrace_hash_unlock(flags); |
415 | out: | 427 | out: |
416 | per_cpu(ftrace_shutdown_disable_cpu, cpu)--; | 428 | per_cpu(ftrace_shutdown_disable_cpu, cpu)--; |
417 | 429 | ||
@@ -887,6 +899,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
887 | 899 | ||
888 | (*pos)++; | 900 | (*pos)++; |
889 | 901 | ||
902 | /* should not be called from interrupt context */ | ||
903 | spin_lock(&ftrace_lock); | ||
890 | retry: | 904 | retry: |
891 | if (iter->idx >= iter->pg->index) { | 905 | if (iter->idx >= iter->pg->index) { |
892 | if (iter->pg->next) { | 906 | if (iter->pg->next) { |
@@ -910,6 +924,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
910 | goto retry; | 924 | goto retry; |
911 | } | 925 | } |
912 | } | 926 | } |
927 | spin_unlock(&ftrace_lock); | ||
913 | 928 | ||
914 | iter->pos = *pos; | 929 | iter->pos = *pos; |
915 | 930 | ||
@@ -1023,8 +1038,8 @@ static void ftrace_filter_reset(int enable) | |||
1023 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1038 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
1024 | unsigned i; | 1039 | unsigned i; |
1025 | 1040 | ||
1026 | /* keep kstop machine from running */ | 1041 | /* should not be called from interrupt context */ |
1027 | preempt_disable(); | 1042 | spin_lock(&ftrace_lock); |
1028 | if (enable) | 1043 | if (enable) |
1029 | ftrace_filtered = 0; | 1044 | ftrace_filtered = 0; |
1030 | pg = ftrace_pages_start; | 1045 | pg = ftrace_pages_start; |
@@ -1037,7 +1052,7 @@ static void ftrace_filter_reset(int enable) | |||
1037 | } | 1052 | } |
1038 | pg = pg->next; | 1053 | pg = pg->next; |
1039 | } | 1054 | } |
1040 | preempt_enable(); | 1055 | spin_unlock(&ftrace_lock); |
1041 | } | 1056 | } |
1042 | 1057 | ||
1043 | static int | 1058 | static int |
@@ -1149,8 +1164,8 @@ ftrace_match(unsigned char *buff, int len, int enable) | |||
1149 | } | 1164 | } |
1150 | } | 1165 | } |
1151 | 1166 | ||
1152 | /* keep kstop machine from running */ | 1167 | /* should not be called from interrupt context */ |
1153 | preempt_disable(); | 1168 | spin_lock(&ftrace_lock); |
1154 | if (enable) | 1169 | if (enable) |
1155 | ftrace_filtered = 1; | 1170 | ftrace_filtered = 1; |
1156 | pg = ftrace_pages_start; | 1171 | pg = ftrace_pages_start; |
@@ -1187,7 +1202,7 @@ ftrace_match(unsigned char *buff, int len, int enable) | |||
1187 | } | 1202 | } |
1188 | pg = pg->next; | 1203 | pg = pg->next; |
1189 | } | 1204 | } |
1190 | preempt_enable(); | 1205 | spin_unlock(&ftrace_lock); |
1191 | } | 1206 | } |
1192 | 1207 | ||
1193 | static ssize_t | 1208 | static ssize_t |
@@ -1551,6 +1566,7 @@ static int ftrace_convert_nops(unsigned long *start, | |||
1551 | p = start; | 1566 | p = start; |
1552 | while (p < end) { | 1567 | while (p < end) { |
1553 | addr = ftrace_call_adjust(*p++); | 1568 | addr = ftrace_call_adjust(*p++); |
1569 | /* should not be called from interrupt context */ | ||
1554 | spin_lock(&ftrace_lock); | 1570 | spin_lock(&ftrace_lock); |
1555 | ftrace_record_ip(addr); | 1571 | ftrace_record_ip(addr); |
1556 | spin_unlock(&ftrace_lock); | 1572 | spin_unlock(&ftrace_lock); |