aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/entry_32.S3
-rw-r--r--kernel/rcupdate.c2
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c37
-rw-r--r--kernel/trace/trace.h1
-rw-r--r--kernel/trace/trace_events.c207
-rw-r--r--kernel/trace/trace_syscalls.c10
7 files changed, 68 insertions, 209 deletions
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 2cfbc3a3a2dd..f0dcb0ceb6a2 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -1176,6 +1176,9 @@ ftrace_restore_flags:
1176#else /* ! CONFIG_DYNAMIC_FTRACE */ 1176#else /* ! CONFIG_DYNAMIC_FTRACE */
1177 1177
1178ENTRY(mcount) 1178ENTRY(mcount)
1179 cmpl $__PAGE_OFFSET, %esp
1180 jb ftrace_stub /* Paging not enabled yet? */
1181
1179 cmpl $0, function_trace_stop 1182 cmpl $0, function_trace_stop
1180 jne ftrace_stub 1183 jne ftrace_stub
1181 1184
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 33eb4620aa17..b02a339836b4 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -122,7 +122,7 @@ struct lockdep_map rcu_sched_lock_map =
122 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); 122 STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
123EXPORT_SYMBOL_GPL(rcu_sched_lock_map); 123EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
124 124
125int debug_lockdep_rcu_enabled(void) 125int notrace debug_lockdep_rcu_enabled(void)
126{ 126{
127 return rcu_scheduler_active && debug_locks && 127 return rcu_scheduler_active && debug_locks &&
128 current->lockdep_recursion == 0; 128 current->lockdep_recursion == 0;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index a6d098c6df3f..03cf44ac54d3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1978,12 +1978,27 @@ int __weak ftrace_arch_code_modify_post_process(void)
1978 1978
1979void ftrace_modify_all_code(int command) 1979void ftrace_modify_all_code(int command)
1980{ 1980{
1981 int update = command & FTRACE_UPDATE_TRACE_FUNC;
1982
1983 /*
1984 * If the ftrace_caller calls a ftrace_ops func directly,
1985 * we need to make sure that it only traces functions it
1986 * expects to trace. When doing the switch of functions,
1987 * we need to update to the ftrace_ops_list_func first
1988 * before the transition between old and new calls are set,
1989 * as the ftrace_ops_list_func will check the ops hashes
1990 * to make sure the ops are having the right functions
1991 * traced.
1992 */
1993 if (update)
1994 ftrace_update_ftrace_func(ftrace_ops_list_func);
1995
1981 if (command & FTRACE_UPDATE_CALLS) 1996 if (command & FTRACE_UPDATE_CALLS)
1982 ftrace_replace_code(1); 1997 ftrace_replace_code(1);
1983 else if (command & FTRACE_DISABLE_CALLS) 1998 else if (command & FTRACE_DISABLE_CALLS)
1984 ftrace_replace_code(0); 1999 ftrace_replace_code(0);
1985 2000
1986 if (command & FTRACE_UPDATE_TRACE_FUNC) 2001 if (update && ftrace_trace_function != ftrace_ops_list_func)
1987 ftrace_update_ftrace_func(ftrace_trace_function); 2002 ftrace_update_ftrace_func(ftrace_trace_function);
1988 2003
1989 if (command & FTRACE_START_FUNC_RET) 2004 if (command & FTRACE_START_FUNC_RET)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 496f94d57698..7974ba20557d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3166,11 +3166,6 @@ static const struct file_operations show_traces_fops = {
3166}; 3166};
3167 3167
3168/* 3168/*
3169 * Only trace on a CPU if the bitmask is set:
3170 */
3171static cpumask_var_t tracing_cpumask;
3172
3173/*
3174 * The tracer itself will not take this lock, but still we want 3169 * The tracer itself will not take this lock, but still we want
3175 * to provide a consistent cpumask to user-space: 3170 * to provide a consistent cpumask to user-space:
3176 */ 3171 */
@@ -3186,11 +3181,12 @@ static ssize_t
3186tracing_cpumask_read(struct file *filp, char __user *ubuf, 3181tracing_cpumask_read(struct file *filp, char __user *ubuf,
3187 size_t count, loff_t *ppos) 3182 size_t count, loff_t *ppos)
3188{ 3183{
3184 struct trace_array *tr = file_inode(filp)->i_private;
3189 int len; 3185 int len;
3190 3186
3191 mutex_lock(&tracing_cpumask_update_lock); 3187 mutex_lock(&tracing_cpumask_update_lock);
3192 3188
3193 len = cpumask_scnprintf(mask_str, count, tracing_cpumask); 3189 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
3194 if (count - len < 2) { 3190 if (count - len < 2) {
3195 count = -EINVAL; 3191 count = -EINVAL;
3196 goto out_err; 3192 goto out_err;
@@ -3208,7 +3204,7 @@ static ssize_t
3208tracing_cpumask_write(struct file *filp, const char __user *ubuf, 3204tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3209 size_t count, loff_t *ppos) 3205 size_t count, loff_t *ppos)
3210{ 3206{
3211 struct trace_array *tr = filp->private_data; 3207 struct trace_array *tr = file_inode(filp)->i_private;
3212 cpumask_var_t tracing_cpumask_new; 3208 cpumask_var_t tracing_cpumask_new;
3213 int err, cpu; 3209 int err, cpu;
3214 3210
@@ -3228,12 +3224,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3228 * Increase/decrease the disabled counter if we are 3224 * Increase/decrease the disabled counter if we are
3229 * about to flip a bit in the cpumask: 3225 * about to flip a bit in the cpumask:
3230 */ 3226 */
3231 if (cpumask_test_cpu(cpu, tracing_cpumask) && 3227 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3232 !cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3228 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3233 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3229 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3234 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); 3230 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
3235 } 3231 }
3236 if (!cpumask_test_cpu(cpu, tracing_cpumask) && 3232 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
3237 cpumask_test_cpu(cpu, tracing_cpumask_new)) { 3233 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
3238 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); 3234 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3239 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); 3235 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
@@ -3242,7 +3238,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3242 arch_spin_unlock(&ftrace_max_lock); 3238 arch_spin_unlock(&ftrace_max_lock);
3243 local_irq_enable(); 3239 local_irq_enable();
3244 3240
3245 cpumask_copy(tracing_cpumask, tracing_cpumask_new); 3241 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
3246 3242
3247 mutex_unlock(&tracing_cpumask_update_lock); 3243 mutex_unlock(&tracing_cpumask_update_lock);
3248 free_cpumask_var(tracing_cpumask_new); 3244 free_cpumask_var(tracing_cpumask_new);
@@ -3256,9 +3252,10 @@ err_unlock:
3256} 3252}
3257 3253
3258static const struct file_operations tracing_cpumask_fops = { 3254static const struct file_operations tracing_cpumask_fops = {
3259 .open = tracing_open_generic, 3255 .open = tracing_open_generic_tr,
3260 .read = tracing_cpumask_read, 3256 .read = tracing_cpumask_read,
3261 .write = tracing_cpumask_write, 3257 .write = tracing_cpumask_write,
3258 .release = tracing_release_generic_tr,
3262 .llseek = generic_file_llseek, 3259 .llseek = generic_file_llseek,
3263}; 3260};
3264 3261
@@ -5938,6 +5935,11 @@ static int new_instance_create(const char *name)
5938 if (!tr->name) 5935 if (!tr->name)
5939 goto out_free_tr; 5936 goto out_free_tr;
5940 5937
5938 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
5939 goto out_free_tr;
5940
5941 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
5942
5941 raw_spin_lock_init(&tr->start_lock); 5943 raw_spin_lock_init(&tr->start_lock);
5942 5944
5943 tr->current_trace = &nop_trace; 5945 tr->current_trace = &nop_trace;
@@ -5969,6 +5971,7 @@ static int new_instance_create(const char *name)
5969 out_free_tr: 5971 out_free_tr:
5970 if (tr->trace_buffer.buffer) 5972 if (tr->trace_buffer.buffer)
5971 ring_buffer_free(tr->trace_buffer.buffer); 5973 ring_buffer_free(tr->trace_buffer.buffer);
5974 free_cpumask_var(tr->tracing_cpumask);
5972 kfree(tr->name); 5975 kfree(tr->name);
5973 kfree(tr); 5976 kfree(tr);
5974 5977
@@ -6098,6 +6101,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6098{ 6101{
6099 int cpu; 6102 int cpu;
6100 6103
6104 trace_create_file("tracing_cpumask", 0644, d_tracer,
6105 tr, &tracing_cpumask_fops);
6106
6101 trace_create_file("trace_options", 0644, d_tracer, 6107 trace_create_file("trace_options", 0644, d_tracer,
6102 tr, &tracing_iter_fops); 6108 tr, &tracing_iter_fops);
6103 6109
@@ -6147,9 +6153,6 @@ static __init int tracer_init_debugfs(void)
6147 6153
6148 init_tracer_debugfs(&global_trace, d_tracer); 6154 init_tracer_debugfs(&global_trace, d_tracer);
6149 6155
6150 trace_create_file("tracing_cpumask", 0644, d_tracer,
6151 &global_trace, &tracing_cpumask_fops);
6152
6153 trace_create_file("available_tracers", 0444, d_tracer, 6156 trace_create_file("available_tracers", 0444, d_tracer,
6154 &global_trace, &show_traces_fops); 6157 &global_trace, &show_traces_fops);
6155 6158
@@ -6371,7 +6374,7 @@ __init static int tracer_alloc_buffers(void)
6371 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) 6374 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6372 goto out; 6375 goto out;
6373 6376
6374 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) 6377 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
6375 goto out_free_buffer_mask; 6378 goto out_free_buffer_mask;
6376 6379
6377 /* Only allocate trace_printk buffers if a trace_printk exists */ 6380 /* Only allocate trace_printk buffers if a trace_printk exists */
@@ -6386,7 +6389,7 @@ __init static int tracer_alloc_buffers(void)
6386 ring_buf_size = 1; 6389 ring_buf_size = 1;
6387 6390
6388 cpumask_copy(tracing_buffer_mask, cpu_possible_mask); 6391 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
6389 cpumask_copy(tracing_cpumask, cpu_all_mask); 6392 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
6390 6393
6391 raw_spin_lock_init(&global_trace.start_lock); 6394 raw_spin_lock_init(&global_trace.start_lock);
6392 6395
@@ -6441,7 +6444,7 @@ out_free_cpumask:
6441#ifdef CONFIG_TRACER_MAX_TRACE 6444#ifdef CONFIG_TRACER_MAX_TRACE
6442 free_percpu(global_trace.max_buffer.data); 6445 free_percpu(global_trace.max_buffer.data);
6443#endif 6446#endif
6444 free_cpumask_var(tracing_cpumask); 6447 free_cpumask_var(global_trace.tracing_cpumask);
6445out_free_buffer_mask: 6448out_free_buffer_mask:
6446 free_cpumask_var(tracing_buffer_mask); 6449 free_cpumask_var(tracing_buffer_mask);
6447out: 6450out:
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fe39acd4c1aa..10c86fb7a2b4 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -206,6 +206,7 @@ struct trace_array {
206 struct dentry *event_dir; 206 struct dentry *event_dir;
207 struct list_head systems; 207 struct list_head systems;
208 struct list_head events; 208 struct list_head events;
209 cpumask_var_t tracing_cpumask; /* only trace on set CPUs */
209 int ref; 210 int ref;
210}; 211};
211 212
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 29a7ebcfb426..368a4d50cc30 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1489,12 +1489,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
1489} 1489}
1490 1490
1491static int 1491static int
1492event_create_dir(struct dentry *parent, 1492event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
1493 struct ftrace_event_file *file,
1494 const struct file_operations *id,
1495 const struct file_operations *enable,
1496 const struct file_operations *filter,
1497 const struct file_operations *format)
1498{ 1493{
1499 struct ftrace_event_call *call = file->event_call; 1494 struct ftrace_event_call *call = file->event_call;
1500 struct trace_array *tr = file->tr; 1495 struct trace_array *tr = file->tr;
@@ -1522,12 +1517,13 @@ event_create_dir(struct dentry *parent,
1522 1517
1523 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) 1518 if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
1524 trace_create_file("enable", 0644, file->dir, file, 1519 trace_create_file("enable", 0644, file->dir, file,
1525 enable); 1520 &ftrace_enable_fops);
1526 1521
1527#ifdef CONFIG_PERF_EVENTS 1522#ifdef CONFIG_PERF_EVENTS
1528 if (call->event.type && call->class->reg) 1523 if (call->event.type && call->class->reg)
1529 trace_create_file("id", 0444, file->dir, 1524 trace_create_file("id", 0444, file->dir,
1530 (void *)(long)call->event.type, id); 1525 (void *)(long)call->event.type,
1526 &ftrace_event_id_fops);
1531#endif 1527#endif
1532 1528
1533 /* 1529 /*
@@ -1544,10 +1540,10 @@ event_create_dir(struct dentry *parent,
1544 } 1540 }
1545 } 1541 }
1546 trace_create_file("filter", 0644, file->dir, call, 1542 trace_create_file("filter", 0644, file->dir, call,
1547 filter); 1543 &ftrace_event_filter_fops);
1548 1544
1549 trace_create_file("format", 0444, file->dir, call, 1545 trace_create_file("format", 0444, file->dir, call,
1550 format); 1546 &ftrace_event_format_fops);
1551 1547
1552 return 0; 1548 return 0;
1553} 1549}
@@ -1648,12 +1644,7 @@ trace_create_new_event(struct ftrace_event_call *call,
1648 1644
1649/* Add an event to a trace directory */ 1645/* Add an event to a trace directory */
1650static int 1646static int
1651__trace_add_new_event(struct ftrace_event_call *call, 1647__trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr)
1652 struct trace_array *tr,
1653 const struct file_operations *id,
1654 const struct file_operations *enable,
1655 const struct file_operations *filter,
1656 const struct file_operations *format)
1657{ 1648{
1658 struct ftrace_event_file *file; 1649 struct ftrace_event_file *file;
1659 1650
@@ -1661,7 +1652,7 @@ __trace_add_new_event(struct ftrace_event_call *call,
1661 if (!file) 1652 if (!file)
1662 return -ENOMEM; 1653 return -ENOMEM;
1663 1654
1664 return event_create_dir(tr->event_dir, file, id, enable, filter, format); 1655 return event_create_dir(tr->event_dir, file);
1665} 1656}
1666 1657
1667/* 1658/*
@@ -1683,8 +1674,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
1683} 1674}
1684 1675
1685struct ftrace_module_file_ops; 1676struct ftrace_module_file_ops;
1686static void __add_event_to_tracers(struct ftrace_event_call *call, 1677static void __add_event_to_tracers(struct ftrace_event_call *call);
1687 struct ftrace_module_file_ops *file_ops);
1688 1678
1689/* Add an additional event_call dynamically */ 1679/* Add an additional event_call dynamically */
1690int trace_add_event_call(struct ftrace_event_call *call) 1680int trace_add_event_call(struct ftrace_event_call *call)
@@ -1695,7 +1685,7 @@ int trace_add_event_call(struct ftrace_event_call *call)
1695 1685
1696 ret = __register_event(call, NULL); 1686 ret = __register_event(call, NULL);
1697 if (ret >= 0) 1687 if (ret >= 0)
1698 __add_event_to_tracers(call, NULL); 1688 __add_event_to_tracers(call);
1699 1689
1700 mutex_unlock(&event_mutex); 1690 mutex_unlock(&event_mutex);
1701 mutex_unlock(&trace_types_lock); 1691 mutex_unlock(&trace_types_lock);
@@ -1769,100 +1759,21 @@ int trace_remove_event_call(struct ftrace_event_call *call)
1769 1759
1770#ifdef CONFIG_MODULES 1760#ifdef CONFIG_MODULES
1771 1761
1772static LIST_HEAD(ftrace_module_file_list);
1773
1774/*
1775 * Modules must own their file_operations to keep up with
1776 * reference counting.
1777 */
1778struct ftrace_module_file_ops {
1779 struct list_head list;
1780 struct module *mod;
1781 struct file_operations id;
1782 struct file_operations enable;
1783 struct file_operations format;
1784 struct file_operations filter;
1785};
1786
1787static struct ftrace_module_file_ops *
1788find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1789{
1790 /*
1791 * As event_calls are added in groups by module,
1792 * when we find one file_ops, we don't need to search for
1793 * each call in that module, as the rest should be the
1794 * same. Only search for a new one if the last one did
1795 * not match.
1796 */
1797 if (file_ops && mod == file_ops->mod)
1798 return file_ops;
1799
1800 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1801 if (file_ops->mod == mod)
1802 return file_ops;
1803 }
1804 return NULL;
1805}
1806
1807static struct ftrace_module_file_ops *
1808trace_create_file_ops(struct module *mod)
1809{
1810 struct ftrace_module_file_ops *file_ops;
1811
1812 /*
1813 * This is a bit of a PITA. To allow for correct reference
1814 * counting, modules must "own" their file_operations.
1815 * To do this, we allocate the file operations that will be
1816 * used in the event directory.
1817 */
1818
1819 file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL);
1820 if (!file_ops)
1821 return NULL;
1822
1823 file_ops->mod = mod;
1824
1825 file_ops->id = ftrace_event_id_fops;
1826 file_ops->id.owner = mod;
1827
1828 file_ops->enable = ftrace_enable_fops;
1829 file_ops->enable.owner = mod;
1830
1831 file_ops->filter = ftrace_event_filter_fops;
1832 file_ops->filter.owner = mod;
1833
1834 file_ops->format = ftrace_event_format_fops;
1835 file_ops->format.owner = mod;
1836
1837 list_add(&file_ops->list, &ftrace_module_file_list);
1838
1839 return file_ops;
1840}
1841
1842static void trace_module_add_events(struct module *mod) 1762static void trace_module_add_events(struct module *mod)
1843{ 1763{
1844 struct ftrace_module_file_ops *file_ops = NULL;
1845 struct ftrace_event_call **call, **start, **end; 1764 struct ftrace_event_call **call, **start, **end;
1846 1765
1847 start = mod->trace_events; 1766 start = mod->trace_events;
1848 end = mod->trace_events + mod->num_trace_events; 1767 end = mod->trace_events + mod->num_trace_events;
1849 1768
1850 if (start == end)
1851 return;
1852
1853 file_ops = trace_create_file_ops(mod);
1854 if (!file_ops)
1855 return;
1856
1857 for_each_event(call, start, end) { 1769 for_each_event(call, start, end) {
1858 __register_event(*call, mod); 1770 __register_event(*call, mod);
1859 __add_event_to_tracers(*call, file_ops); 1771 __add_event_to_tracers(*call);
1860 } 1772 }
1861} 1773}
1862 1774
1863static void trace_module_remove_events(struct module *mod) 1775static void trace_module_remove_events(struct module *mod)
1864{ 1776{
1865 struct ftrace_module_file_ops *file_ops;
1866 struct ftrace_event_call *call, *p; 1777 struct ftrace_event_call *call, *p;
1867 bool clear_trace = false; 1778 bool clear_trace = false;
1868 1779
@@ -1874,16 +1785,6 @@ static void trace_module_remove_events(struct module *mod)
1874 __trace_remove_event_call(call); 1785 __trace_remove_event_call(call);
1875 } 1786 }
1876 } 1787 }
1877
1878 /* Now free the file_operations */
1879 list_for_each_entry(file_ops, &ftrace_module_file_list, list) {
1880 if (file_ops->mod == mod)
1881 break;
1882 }
1883 if (&file_ops->list != &ftrace_module_file_list) {
1884 list_del(&file_ops->list);
1885 kfree(file_ops);
1886 }
1887 up_write(&trace_event_sem); 1788 up_write(&trace_event_sem);
1888 1789
1889 /* 1790 /*
@@ -1919,67 +1820,21 @@ static int trace_module_notify(struct notifier_block *self,
1919 return 0; 1820 return 0;
1920} 1821}
1921 1822
1922static int 1823static struct notifier_block trace_module_nb = {
1923__trace_add_new_mod_event(struct ftrace_event_call *call, 1824 .notifier_call = trace_module_notify,
1924 struct trace_array *tr, 1825 .priority = 0,
1925 struct ftrace_module_file_ops *file_ops) 1826};
1926{
1927 return __trace_add_new_event(call, tr,
1928 &file_ops->id, &file_ops->enable,
1929 &file_ops->filter, &file_ops->format);
1930}
1931
1932#else
1933static inline struct ftrace_module_file_ops *
1934find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod)
1935{
1936 return NULL;
1937}
1938static inline int trace_module_notify(struct notifier_block *self,
1939 unsigned long val, void *data)
1940{
1941 return 0;
1942}
1943static inline int
1944__trace_add_new_mod_event(struct ftrace_event_call *call,
1945 struct trace_array *tr,
1946 struct ftrace_module_file_ops *file_ops)
1947{
1948 return -ENODEV;
1949}
1950#endif /* CONFIG_MODULES */ 1827#endif /* CONFIG_MODULES */
1951 1828
1952/* Create a new event directory structure for a trace directory. */ 1829/* Create a new event directory structure for a trace directory. */
1953static void 1830static void
1954__trace_add_event_dirs(struct trace_array *tr) 1831__trace_add_event_dirs(struct trace_array *tr)
1955{ 1832{
1956 struct ftrace_module_file_ops *file_ops = NULL;
1957 struct ftrace_event_call *call; 1833 struct ftrace_event_call *call;
1958 int ret; 1834 int ret;
1959 1835
1960 list_for_each_entry(call, &ftrace_events, list) { 1836 list_for_each_entry(call, &ftrace_events, list) {
1961 if (call->mod) { 1837 ret = __trace_add_new_event(call, tr);
1962 /*
1963 * Directories for events by modules need to
1964 * keep module ref counts when opened (as we don't
1965 * want the module to disappear when reading one
1966 * of these files). The file_ops keep account of
1967 * the module ref count.
1968 */
1969 file_ops = find_ftrace_file_ops(file_ops, call->mod);
1970 if (!file_ops)
1971 continue; /* Warn? */
1972 ret = __trace_add_new_mod_event(call, tr, file_ops);
1973 if (ret < 0)
1974 pr_warning("Could not create directory for event %s\n",
1975 call->name);
1976 continue;
1977 }
1978 ret = __trace_add_new_event(call, tr,
1979 &ftrace_event_id_fops,
1980 &ftrace_enable_fops,
1981 &ftrace_event_filter_fops,
1982 &ftrace_event_format_fops);
1983 if (ret < 0) 1838 if (ret < 0)
1984 pr_warning("Could not create directory for event %s\n", 1839 pr_warning("Could not create directory for event %s\n",
1985 call->name); 1840 call->name);
@@ -2287,11 +2142,7 @@ __trace_early_add_event_dirs(struct trace_array *tr)
2287 2142
2288 2143
2289 list_for_each_entry(file, &tr->events, list) { 2144 list_for_each_entry(file, &tr->events, list) {
2290 ret = event_create_dir(tr->event_dir, file, 2145 ret = event_create_dir(tr->event_dir, file);
2291 &ftrace_event_id_fops,
2292 &ftrace_enable_fops,
2293 &ftrace_event_filter_fops,
2294 &ftrace_event_format_fops);
2295 if (ret < 0) 2146 if (ret < 0)
2296 pr_warning("Could not create directory for event %s\n", 2147 pr_warning("Could not create directory for event %s\n",
2297 file->event_call->name); 2148 file->event_call->name);
@@ -2332,29 +2183,14 @@ __trace_remove_event_dirs(struct trace_array *tr)
2332 remove_event_file_dir(file); 2183 remove_event_file_dir(file);
2333} 2184}
2334 2185
2335static void 2186static void __add_event_to_tracers(struct ftrace_event_call *call)
2336__add_event_to_tracers(struct ftrace_event_call *call,
2337 struct ftrace_module_file_ops *file_ops)
2338{ 2187{
2339 struct trace_array *tr; 2188 struct trace_array *tr;
2340 2189
2341 list_for_each_entry(tr, &ftrace_trace_arrays, list) { 2190 list_for_each_entry(tr, &ftrace_trace_arrays, list)
2342 if (file_ops) 2191 __trace_add_new_event(call, tr);
2343 __trace_add_new_mod_event(call, tr, file_ops);
2344 else
2345 __trace_add_new_event(call, tr,
2346 &ftrace_event_id_fops,
2347 &ftrace_enable_fops,
2348 &ftrace_event_filter_fops,
2349 &ftrace_event_format_fops);
2350 }
2351} 2192}
2352 2193
2353static struct notifier_block trace_module_nb = {
2354 .notifier_call = trace_module_notify,
2355 .priority = 0,
2356};
2357
2358extern struct ftrace_event_call *__start_ftrace_events[]; 2194extern struct ftrace_event_call *__start_ftrace_events[];
2359extern struct ftrace_event_call *__stop_ftrace_events[]; 2195extern struct ftrace_event_call *__stop_ftrace_events[];
2360 2196
@@ -2559,10 +2395,11 @@ static __init int event_trace_init(void)
2559 if (ret) 2395 if (ret)
2560 return ret; 2396 return ret;
2561 2397
2398#ifdef CONFIG_MODULES
2562 ret = register_module_notifier(&trace_module_nb); 2399 ret = register_module_notifier(&trace_module_nb);
2563 if (ret) 2400 if (ret)
2564 pr_warning("Failed to register trace events module notifier\n"); 2401 pr_warning("Failed to register trace events module notifier\n");
2565 2402#endif
2566 return 0; 2403 return 0;
2567} 2404}
2568early_initcall(event_trace_memsetup); 2405early_initcall(event_trace_memsetup);
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 8fd03657bc7d..559329d9bd2f 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -200,8 +200,8 @@ extern char *__bad_type_size(void);
200 #type, #name, offsetof(typeof(trace), name), \ 200 #type, #name, offsetof(typeof(trace), name), \
201 sizeof(trace.name), is_signed_type(type) 201 sizeof(trace.name), is_signed_type(type)
202 202
203static 203static int __init
204int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) 204__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
205{ 205{
206 int i; 206 int i;
207 int pos = 0; 207 int pos = 0;
@@ -228,7 +228,7 @@ int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
228 return pos; 228 return pos;
229} 229}
230 230
231static int set_syscall_print_fmt(struct ftrace_event_call *call) 231static int __init set_syscall_print_fmt(struct ftrace_event_call *call)
232{ 232{
233 char *print_fmt; 233 char *print_fmt;
234 int len; 234 int len;
@@ -253,7 +253,7 @@ static int set_syscall_print_fmt(struct ftrace_event_call *call)
253 return 0; 253 return 0;
254} 254}
255 255
256static void free_syscall_print_fmt(struct ftrace_event_call *call) 256static void __init free_syscall_print_fmt(struct ftrace_event_call *call)
257{ 257{
258 struct syscall_metadata *entry = call->data; 258 struct syscall_metadata *entry = call->data;
259 259
@@ -459,7 +459,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
459 mutex_unlock(&syscall_trace_lock); 459 mutex_unlock(&syscall_trace_lock);
460} 460}
461 461
462static int init_syscall_trace(struct ftrace_event_call *call) 462static int __init init_syscall_trace(struct ftrace_event_call *call)
463{ 463{
464 int id; 464 int id;
465 int num; 465 int num;