diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-02-14 01:15:39 -0500 |
---|---|---|
committer | Steven Rostedt <srostedt@redhat.com> | 2009-02-16 17:33:14 -0500 |
commit | 52baf11922db7377b580dd5448a07f71c6a35611 (patch) | |
tree | 8fe8a5346da71f63d627ff3824e065efff88889b /kernel/trace/ftrace.c | |
parent | f6180773d90595650e11de0118bb112018290915 (diff) |
ftrace: convert ftrace_lock from a spinlock to mutex
Impact: clean up
The older versions of ftrace required doing the ftrace list
search under atomic context. Now all the calls are in non-atomic
context. There is no reason to keep the ftrace_lock as a spinlock.
This patch converts it to a mutex.
Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 51 |
1 files changed, 19 insertions, 32 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 45a44c402566..4771732037ee 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -61,7 +61,7 @@ int function_trace_stop; | |||
61 | */ | 61 | */ |
62 | static int ftrace_disabled __read_mostly; | 62 | static int ftrace_disabled __read_mostly; |
63 | 63 | ||
64 | static DEFINE_SPINLOCK(ftrace_lock); | 64 | static DEFINE_MUTEX(ftrace_lock); |
65 | static DEFINE_MUTEX(ftrace_sysctl_lock); | 65 | static DEFINE_MUTEX(ftrace_sysctl_lock); |
66 | static DEFINE_MUTEX(ftrace_start_lock); | 66 | static DEFINE_MUTEX(ftrace_start_lock); |
67 | 67 | ||
@@ -134,8 +134,7 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | |||
134 | 134 | ||
135 | static int __register_ftrace_function(struct ftrace_ops *ops) | 135 | static int __register_ftrace_function(struct ftrace_ops *ops) |
136 | { | 136 | { |
137 | /* should not be called from interrupt context */ | 137 | mutex_lock(&ftrace_lock); |
138 | spin_lock(&ftrace_lock); | ||
139 | 138 | ||
140 | ops->next = ftrace_list; | 139 | ops->next = ftrace_list; |
141 | /* | 140 | /* |
@@ -172,7 +171,7 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
172 | #endif | 171 | #endif |
173 | } | 172 | } |
174 | 173 | ||
175 | spin_unlock(&ftrace_lock); | 174 | mutex_unlock(&ftrace_lock); |
176 | 175 | ||
177 | return 0; | 176 | return 0; |
178 | } | 177 | } |
@@ -182,8 +181,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
182 | struct ftrace_ops **p; | 181 | struct ftrace_ops **p; |
183 | int ret = 0; | 182 | int ret = 0; |
184 | 183 | ||
185 | /* should not be called from interrupt context */ | 184 | mutex_lock(&ftrace_lock); |
186 | spin_lock(&ftrace_lock); | ||
187 | 185 | ||
188 | /* | 186 | /* |
189 | * If we are removing the last function, then simply point | 187 | * If we are removing the last function, then simply point |
@@ -224,7 +222,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
224 | } | 222 | } |
225 | 223 | ||
226 | out: | 224 | out: |
227 | spin_unlock(&ftrace_lock); | 225 | mutex_unlock(&ftrace_lock); |
228 | 226 | ||
229 | return ret; | 227 | return ret; |
230 | } | 228 | } |
@@ -233,8 +231,7 @@ static void ftrace_update_pid_func(void) | |||
233 | { | 231 | { |
234 | ftrace_func_t func; | 232 | ftrace_func_t func; |
235 | 233 | ||
236 | /* should not be called from interrupt context */ | 234 | mutex_lock(&ftrace_lock); |
237 | spin_lock(&ftrace_lock); | ||
238 | 235 | ||
239 | if (ftrace_trace_function == ftrace_stub) | 236 | if (ftrace_trace_function == ftrace_stub) |
240 | goto out; | 237 | goto out; |
@@ -256,7 +253,7 @@ static void ftrace_update_pid_func(void) | |||
256 | #endif | 253 | #endif |
257 | 254 | ||
258 | out: | 255 | out: |
259 | spin_unlock(&ftrace_lock); | 256 | mutex_unlock(&ftrace_lock); |
260 | } | 257 | } |
261 | 258 | ||
262 | #ifdef CONFIG_DYNAMIC_FTRACE | 259 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -358,15 +355,12 @@ void ftrace_release(void *start, unsigned long size) | |||
358 | if (ftrace_disabled || !start) | 355 | if (ftrace_disabled || !start) |
359 | return; | 356 | return; |
360 | 357 | ||
361 | /* should not be called from interrupt context */ | 358 | mutex_lock(&ftrace_lock); |
362 | spin_lock(&ftrace_lock); | ||
363 | |||
364 | do_for_each_ftrace_rec(pg, rec) { | 359 | do_for_each_ftrace_rec(pg, rec) { |
365 | if ((rec->ip >= s) && (rec->ip < e)) | 360 | if ((rec->ip >= s) && (rec->ip < e)) |
366 | ftrace_free_rec(rec); | 361 | ftrace_free_rec(rec); |
367 | } while_for_each_ftrace_rec(); | 362 | } while_for_each_ftrace_rec(); |
368 | 363 | mutex_unlock(&ftrace_lock); | |
369 | spin_unlock(&ftrace_lock); | ||
370 | } | 364 | } |
371 | 365 | ||
372 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 366 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
@@ -803,8 +797,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
803 | if (iter->flags & FTRACE_ITER_PRINTALL) | 797 | if (iter->flags & FTRACE_ITER_PRINTALL) |
804 | return NULL; | 798 | return NULL; |
805 | 799 | ||
806 | /* should not be called from interrupt context */ | 800 | mutex_lock(&ftrace_lock); |
807 | spin_lock(&ftrace_lock); | ||
808 | retry: | 801 | retry: |
809 | if (iter->idx >= iter->pg->index) { | 802 | if (iter->idx >= iter->pg->index) { |
810 | if (iter->pg->next) { | 803 | if (iter->pg->next) { |
@@ -833,7 +826,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
833 | goto retry; | 826 | goto retry; |
834 | } | 827 | } |
835 | } | 828 | } |
836 | spin_unlock(&ftrace_lock); | 829 | mutex_unlock(&ftrace_lock); |
837 | 830 | ||
838 | return rec; | 831 | return rec; |
839 | } | 832 | } |
@@ -962,8 +955,7 @@ static void ftrace_filter_reset(int enable) | |||
962 | struct dyn_ftrace *rec; | 955 | struct dyn_ftrace *rec; |
963 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 956 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
964 | 957 | ||
965 | /* should not be called from interrupt context */ | 958 | mutex_lock(&ftrace_lock); |
966 | spin_lock(&ftrace_lock); | ||
967 | if (enable) | 959 | if (enable) |
968 | ftrace_filtered = 0; | 960 | ftrace_filtered = 0; |
969 | do_for_each_ftrace_rec(pg, rec) { | 961 | do_for_each_ftrace_rec(pg, rec) { |
@@ -971,8 +963,7 @@ static void ftrace_filter_reset(int enable) | |||
971 | continue; | 963 | continue; |
972 | rec->flags &= ~type; | 964 | rec->flags &= ~type; |
973 | } while_for_each_ftrace_rec(); | 965 | } while_for_each_ftrace_rec(); |
974 | 966 | mutex_unlock(&ftrace_lock); | |
975 | spin_unlock(&ftrace_lock); | ||
976 | } | 967 | } |
977 | 968 | ||
978 | static int | 969 | static int |
@@ -1151,8 +1142,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
1151 | 1142 | ||
1152 | search_len = strlen(search); | 1143 | search_len = strlen(search); |
1153 | 1144 | ||
1154 | /* should not be called from interrupt context */ | 1145 | mutex_lock(&ftrace_lock); |
1155 | spin_lock(&ftrace_lock); | ||
1156 | do_for_each_ftrace_rec(pg, rec) { | 1146 | do_for_each_ftrace_rec(pg, rec) { |
1157 | 1147 | ||
1158 | if (rec->flags & FTRACE_FL_FAILED) | 1148 | if (rec->flags & FTRACE_FL_FAILED) |
@@ -1171,7 +1161,7 @@ static void ftrace_match_records(char *buff, int len, int enable) | |||
1171 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | 1161 | if (enable && (rec->flags & FTRACE_FL_FILTER)) |
1172 | ftrace_filtered = 1; | 1162 | ftrace_filtered = 1; |
1173 | } while_for_each_ftrace_rec(); | 1163 | } while_for_each_ftrace_rec(); |
1174 | spin_unlock(&ftrace_lock); | 1164 | mutex_unlock(&ftrace_lock); |
1175 | } | 1165 | } |
1176 | 1166 | ||
1177 | static int | 1167 | static int |
@@ -1218,8 +1208,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1218 | search_len = strlen(search); | 1208 | search_len = strlen(search); |
1219 | } | 1209 | } |
1220 | 1210 | ||
1221 | /* should not be called from interrupt context */ | 1211 | mutex_lock(&ftrace_lock); |
1222 | spin_lock(&ftrace_lock); | ||
1223 | do_for_each_ftrace_rec(pg, rec) { | 1212 | do_for_each_ftrace_rec(pg, rec) { |
1224 | 1213 | ||
1225 | if (rec->flags & FTRACE_FL_FAILED) | 1214 | if (rec->flags & FTRACE_FL_FAILED) |
@@ -1236,7 +1225,7 @@ static void ftrace_match_module_records(char *buff, char *mod, int enable) | |||
1236 | ftrace_filtered = 1; | 1225 | ftrace_filtered = 1; |
1237 | 1226 | ||
1238 | } while_for_each_ftrace_rec(); | 1227 | } while_for_each_ftrace_rec(); |
1239 | spin_unlock(&ftrace_lock); | 1228 | mutex_unlock(&ftrace_lock); |
1240 | } | 1229 | } |
1241 | 1230 | ||
1242 | /* | 1231 | /* |
@@ -1676,9 +1665,7 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer) | |||
1676 | if (ftrace_disabled) | 1665 | if (ftrace_disabled) |
1677 | return -ENODEV; | 1666 | return -ENODEV; |
1678 | 1667 | ||
1679 | /* should not be called from interrupt context */ | 1668 | mutex_lock(&ftrace_lock); |
1680 | spin_lock(&ftrace_lock); | ||
1681 | |||
1682 | do_for_each_ftrace_rec(pg, rec) { | 1669 | do_for_each_ftrace_rec(pg, rec) { |
1683 | 1670 | ||
1684 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | 1671 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) |
@@ -1699,7 +1686,7 @@ ftrace_set_func(unsigned long *array, int idx, char *buffer) | |||
1699 | } | 1686 | } |
1700 | } while_for_each_ftrace_rec(); | 1687 | } while_for_each_ftrace_rec(); |
1701 | out: | 1688 | out: |
1702 | spin_unlock(&ftrace_lock); | 1689 | mutex_unlock(&ftrace_lock); |
1703 | 1690 | ||
1704 | return found ? 0 : -EINVAL; | 1691 | return found ? 0 : -EINVAL; |
1705 | } | 1692 | } |