diff options
Diffstat (limited to 'kernel')
44 files changed, 542 insertions, 451 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 470839d1a30e..35ef1185e359 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux kernel. | 2 | # Makefile for the linux kernel. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y = fork.o exec_domain.o panic.o printk.o \ | 5 | obj-y = fork.o exec_domain.o panic.o \ |
6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ | 6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ |
7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ |
@@ -24,6 +24,7 @@ endif | |||
24 | 24 | ||
25 | obj-y += sched/ | 25 | obj-y += sched/ |
26 | obj-y += power/ | 26 | obj-y += power/ |
27 | obj-y += printk/ | ||
27 | obj-y += cpu/ | 28 | obj-y += cpu/ |
28 | 29 | ||
29 | obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o | 30 | obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0e0b20b8c5db..789ec4683db3 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1845,36 +1845,43 @@ out: | |||
1845 | EXPORT_SYMBOL_GPL(cgroup_path); | 1845 | EXPORT_SYMBOL_GPL(cgroup_path); |
1846 | 1846 | ||
1847 | /** | 1847 | /** |
1848 | * task_cgroup_path_from_hierarchy - cgroup path of a task on a hierarchy | 1848 | * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy |
1849 | * @task: target task | 1849 | * @task: target task |
1850 | * @hierarchy_id: the hierarchy to look up @task's cgroup from | ||
1851 | * @buf: the buffer to write the path into | 1850 | * @buf: the buffer to write the path into |
1852 | * @buflen: the length of the buffer | 1851 | * @buflen: the length of the buffer |
1853 | * | 1852 | * |
1854 | * Determine @task's cgroup on the hierarchy specified by @hierarchy_id and | 1853 | * Determine @task's cgroup on the first (the one with the lowest non-zero |
1855 | * copy its path into @buf. This function grabs cgroup_mutex and shouldn't | 1854 | * hierarchy_id) cgroup hierarchy and copy its path into @buf. This |
1856 | * be used inside locks used by cgroup controller callbacks. | 1855 | * function grabs cgroup_mutex and shouldn't be used inside locks used by |
1856 | * cgroup controller callbacks. | ||
1857 | * | ||
1858 | * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short. | ||
1857 | */ | 1859 | */ |
1858 | int task_cgroup_path_from_hierarchy(struct task_struct *task, int hierarchy_id, | 1860 | int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) |
1859 | char *buf, size_t buflen) | ||
1860 | { | 1861 | { |
1861 | struct cgroupfs_root *root; | 1862 | struct cgroupfs_root *root; |
1862 | struct cgroup *cgrp = NULL; | 1863 | struct cgroup *cgrp; |
1863 | int ret = -ENOENT; | 1864 | int hierarchy_id = 1, ret = 0; |
1865 | |||
1866 | if (buflen < 2) | ||
1867 | return -ENAMETOOLONG; | ||
1864 | 1868 | ||
1865 | mutex_lock(&cgroup_mutex); | 1869 | mutex_lock(&cgroup_mutex); |
1866 | 1870 | ||
1867 | root = idr_find(&cgroup_hierarchy_idr, hierarchy_id); | 1871 | root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id); |
1872 | |||
1868 | if (root) { | 1873 | if (root) { |
1869 | cgrp = task_cgroup_from_root(task, root); | 1874 | cgrp = task_cgroup_from_root(task, root); |
1870 | ret = cgroup_path(cgrp, buf, buflen); | 1875 | ret = cgroup_path(cgrp, buf, buflen); |
1876 | } else { | ||
1877 | /* if no hierarchy exists, everyone is in "/" */ | ||
1878 | memcpy(buf, "/", 2); | ||
1871 | } | 1879 | } |
1872 | 1880 | ||
1873 | mutex_unlock(&cgroup_mutex); | 1881 | mutex_unlock(&cgroup_mutex); |
1874 | |||
1875 | return ret; | 1882 | return ret; |
1876 | } | 1883 | } |
1877 | EXPORT_SYMBOL_GPL(task_cgroup_path_from_hierarchy); | 1884 | EXPORT_SYMBOL_GPL(task_cgroup_path); |
1878 | 1885 | ||
1879 | /* | 1886 | /* |
1880 | * Control Group taskset | 1887 | * Control Group taskset |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 198a38883e64..b2b227b82123 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -366,7 +366,7 @@ EXPORT_SYMBOL(cpu_down); | |||
366 | #endif /*CONFIG_HOTPLUG_CPU*/ | 366 | #endif /*CONFIG_HOTPLUG_CPU*/ |
367 | 367 | ||
368 | /* Requires cpu_add_remove_lock to be held */ | 368 | /* Requires cpu_add_remove_lock to be held */ |
369 | static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | 369 | static int _cpu_up(unsigned int cpu, int tasks_frozen) |
370 | { | 370 | { |
371 | int ret, nr_calls = 0; | 371 | int ret, nr_calls = 0; |
372 | void *hcpu = (void *)(long)cpu; | 372 | void *hcpu = (void *)(long)cpu; |
@@ -419,7 +419,7 @@ out: | |||
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |
421 | 421 | ||
422 | int __cpuinit cpu_up(unsigned int cpu) | 422 | int cpu_up(unsigned int cpu) |
423 | { | 423 | { |
424 | int err = 0; | 424 | int err = 0; |
425 | 425 | ||
@@ -618,7 +618,7 @@ core_initcall(cpu_hotplug_pm_sync_init); | |||
618 | * It must be called by the arch code on the new cpu, before the new cpu | 618 | * It must be called by the arch code on the new cpu, before the new cpu |
619 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | 619 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
620 | */ | 620 | */ |
621 | void __cpuinit notify_cpu_starting(unsigned int cpu) | 621 | void notify_cpu_starting(unsigned int cpu) |
622 | { | 622 | { |
623 | unsigned long val = CPU_STARTING; | 623 | unsigned long val = CPU_STARTING; |
624 | 624 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index eba8fb5834ae..f86599e8c123 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -6234,8 +6234,6 @@ perf_event_mux_interval_ms_store(struct device *dev, | |||
6234 | return count; | 6234 | return count; |
6235 | } | 6235 | } |
6236 | 6236 | ||
6237 | #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) | ||
6238 | |||
6239 | static struct device_attribute pmu_dev_attrs[] = { | 6237 | static struct device_attribute pmu_dev_attrs[] = { |
6240 | __ATTR_RO(type), | 6238 | __ATTR_RO(type), |
6241 | __ATTR_RW(perf_event_mux_interval_ms), | 6239 | __ATTR_RW(perf_event_mux_interval_ms), |
@@ -7630,7 +7628,7 @@ static void __init perf_event_init_all_cpus(void) | |||
7630 | } | 7628 | } |
7631 | } | 7629 | } |
7632 | 7630 | ||
7633 | static void __cpuinit perf_event_init_cpu(int cpu) | 7631 | static void perf_event_init_cpu(int cpu) |
7634 | { | 7632 | { |
7635 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 7633 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
7636 | 7634 | ||
@@ -7719,7 +7717,7 @@ static struct notifier_block perf_reboot_notifier = { | |||
7719 | .priority = INT_MIN, | 7717 | .priority = INT_MIN, |
7720 | }; | 7718 | }; |
7721 | 7719 | ||
7722 | static int __cpuinit | 7720 | static int |
7723 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 7721 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
7724 | { | 7722 | { |
7725 | unsigned int cpu = (long)hcpu; | 7723 | unsigned int cpu = (long)hcpu; |
diff --git a/kernel/fork.c b/kernel/fork.c index 66635c80a813..403d2bb8a968 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1546,7 +1546,7 @@ static inline void init_idle_pids(struct pid_link *links) | |||
1546 | } | 1546 | } |
1547 | } | 1547 | } |
1548 | 1548 | ||
1549 | struct task_struct * __cpuinit fork_idle(int cpu) | 1549 | struct task_struct *fork_idle(int cpu) |
1550 | { | 1550 | { |
1551 | struct task_struct *task; | 1551 | struct task_struct *task; |
1552 | task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); | 1552 | task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); |
diff --git a/kernel/freezer.c b/kernel/freezer.c index 8b2afc1c9df0..b462fa197517 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c | |||
@@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(freezer_lock); | |||
33 | */ | 33 | */ |
34 | bool freezing_slow_path(struct task_struct *p) | 34 | bool freezing_slow_path(struct task_struct *p) |
35 | { | 35 | { |
36 | if (p->flags & PF_NOFREEZE) | 36 | if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) |
37 | return false; | 37 | return false; |
38 | 38 | ||
39 | if (pm_nosig_freezing || cgroup_freezing(p)) | 39 | if (pm_nosig_freezing || cgroup_freezing(p)) |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f0f4fe29cd21..383319bae3f7 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1659,7 +1659,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | |||
1659 | /* | 1659 | /* |
1660 | * Functions related to boot-time initialization: | 1660 | * Functions related to boot-time initialization: |
1661 | */ | 1661 | */ |
1662 | static void __cpuinit init_hrtimers_cpu(int cpu) | 1662 | static void init_hrtimers_cpu(int cpu) |
1663 | { | 1663 | { |
1664 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1664 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1665 | int i; | 1665 | int i; |
@@ -1740,7 +1740,7 @@ static void migrate_hrtimers(int scpu) | |||
1740 | 1740 | ||
1741 | #endif /* CONFIG_HOTPLUG_CPU */ | 1741 | #endif /* CONFIG_HOTPLUG_CPU */ |
1742 | 1742 | ||
1743 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | 1743 | static int hrtimer_cpu_notify(struct notifier_block *self, |
1744 | unsigned long action, void *hcpu) | 1744 | unsigned long action, void *hcpu) |
1745 | { | 1745 | { |
1746 | int scpu = (long)hcpu; | 1746 | int scpu = (long)hcpu; |
@@ -1773,7 +1773,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
1773 | return NOTIFY_OK; | 1773 | return NOTIFY_OK; |
1774 | } | 1774 | } |
1775 | 1775 | ||
1776 | static struct notifier_block __cpuinitdata hrtimers_nb = { | 1776 | static struct notifier_block hrtimers_nb = { |
1777 | .notifier_call = hrtimer_cpu_notify, | 1777 | .notifier_call = hrtimer_cpu_notify, |
1778 | }; | 1778 | }; |
1779 | 1779 | ||
diff --git a/kernel/power/autosleep.c b/kernel/power/autosleep.c index c6422ffeda9a..9012ecf7b814 100644 --- a/kernel/power/autosleep.c +++ b/kernel/power/autosleep.c | |||
@@ -32,7 +32,8 @@ static void try_to_suspend(struct work_struct *work) | |||
32 | 32 | ||
33 | mutex_lock(&autosleep_lock); | 33 | mutex_lock(&autosleep_lock); |
34 | 34 | ||
35 | if (!pm_save_wakeup_count(initial_count)) { | 35 | if (!pm_save_wakeup_count(initial_count) || |
36 | system_state != SYSTEM_RUNNING) { | ||
36 | mutex_unlock(&autosleep_lock); | 37 | mutex_unlock(&autosleep_lock); |
37 | goto out; | 38 | goto out; |
38 | } | 39 | } |
diff --git a/kernel/power/process.c b/kernel/power/process.c index fc0df8486449..06ec8869dbf1 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -109,6 +109,8 @@ static int try_to_freeze_tasks(bool user_only) | |||
109 | 109 | ||
110 | /** | 110 | /** |
111 | * freeze_processes - Signal user space processes to enter the refrigerator. | 111 | * freeze_processes - Signal user space processes to enter the refrigerator. |
112 | * The current thread will not be frozen. The same process that calls | ||
113 | * freeze_processes must later call thaw_processes. | ||
112 | * | 114 | * |
113 | * On success, returns 0. On failure, -errno and system is fully thawed. | 115 | * On success, returns 0. On failure, -errno and system is fully thawed. |
114 | */ | 116 | */ |
@@ -120,6 +122,9 @@ int freeze_processes(void) | |||
120 | if (error) | 122 | if (error) |
121 | return error; | 123 | return error; |
122 | 124 | ||
125 | /* Make sure this task doesn't get frozen */ | ||
126 | current->flags |= PF_SUSPEND_TASK; | ||
127 | |||
123 | if (!pm_freezing) | 128 | if (!pm_freezing) |
124 | atomic_inc(&system_freezing_cnt); | 129 | atomic_inc(&system_freezing_cnt); |
125 | 130 | ||
@@ -168,6 +173,7 @@ int freeze_kernel_threads(void) | |||
168 | void thaw_processes(void) | 173 | void thaw_processes(void) |
169 | { | 174 | { |
170 | struct task_struct *g, *p; | 175 | struct task_struct *g, *p; |
176 | struct task_struct *curr = current; | ||
171 | 177 | ||
172 | if (pm_freezing) | 178 | if (pm_freezing) |
173 | atomic_dec(&system_freezing_cnt); | 179 | atomic_dec(&system_freezing_cnt); |
@@ -182,10 +188,15 @@ void thaw_processes(void) | |||
182 | 188 | ||
183 | read_lock(&tasklist_lock); | 189 | read_lock(&tasklist_lock); |
184 | do_each_thread(g, p) { | 190 | do_each_thread(g, p) { |
191 | /* No other threads should have PF_SUSPEND_TASK set */ | ||
192 | WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); | ||
185 | __thaw_task(p); | 193 | __thaw_task(p); |
186 | } while_each_thread(g, p); | 194 | } while_each_thread(g, p); |
187 | read_unlock(&tasklist_lock); | 195 | read_unlock(&tasklist_lock); |
188 | 196 | ||
197 | WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); | ||
198 | curr->flags &= ~PF_SUSPEND_TASK; | ||
199 | |||
189 | usermodehelper_enable(); | 200 | usermodehelper_enable(); |
190 | 201 | ||
191 | schedule(); | 202 | schedule(); |
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile new file mode 100644 index 000000000000..85405bdcf2b3 --- /dev/null +++ b/kernel/printk/Makefile | |||
@@ -0,0 +1,2 @@ | |||
1 | obj-y = printk.o | ||
2 | obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o | ||
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c new file mode 100644 index 000000000000..b51087fb9ace --- /dev/null +++ b/kernel/printk/braille.c | |||
@@ -0,0 +1,48 @@ | |||
1 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
2 | |||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/console.h> | ||
5 | #include <linux/string.h> | ||
6 | |||
7 | #include "console_cmdline.h" | ||
8 | #include "braille.h" | ||
9 | |||
10 | char *_braille_console_setup(char **str, char **brl_options) | ||
11 | { | ||
12 | if (!memcmp(*str, "brl,", 4)) { | ||
13 | *brl_options = ""; | ||
14 | *str += 4; | ||
15 | } else if (!memcmp(str, "brl=", 4)) { | ||
16 | *brl_options = *str + 4; | ||
17 | *str = strchr(*brl_options, ','); | ||
18 | if (!*str) | ||
19 | pr_err("need port name after brl=\n"); | ||
20 | else | ||
21 | *((*str)++) = 0; | ||
22 | } | ||
23 | |||
24 | return *str; | ||
25 | } | ||
26 | |||
27 | int | ||
28 | _braille_register_console(struct console *console, struct console_cmdline *c) | ||
29 | { | ||
30 | int rtn = 0; | ||
31 | |||
32 | if (c->brl_options) { | ||
33 | console->flags |= CON_BRL; | ||
34 | rtn = braille_register_console(console, c->index, c->options, | ||
35 | c->brl_options); | ||
36 | } | ||
37 | |||
38 | return rtn; | ||
39 | } | ||
40 | |||
41 | int | ||
42 | _braille_unregister_console(struct console *console) | ||
43 | { | ||
44 | if (console->flags & CON_BRL) | ||
45 | return braille_unregister_console(console); | ||
46 | |||
47 | return 0; | ||
48 | } | ||
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h new file mode 100644 index 000000000000..769d771145c8 --- /dev/null +++ b/kernel/printk/braille.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef _PRINTK_BRAILLE_H | ||
2 | #define _PRINTK_BRAILLE_H | ||
3 | |||
4 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
5 | |||
6 | static inline void | ||
7 | braille_set_options(struct console_cmdline *c, char *brl_options) | ||
8 | { | ||
9 | c->brl_options = brl_options; | ||
10 | } | ||
11 | |||
12 | char * | ||
13 | _braille_console_setup(char **str, char **brl_options); | ||
14 | |||
15 | int | ||
16 | _braille_register_console(struct console *console, struct console_cmdline *c); | ||
17 | |||
18 | int | ||
19 | _braille_unregister_console(struct console *console); | ||
20 | |||
21 | #else | ||
22 | |||
23 | static inline void | ||
24 | braille_set_options(struct console_cmdline *c, char *brl_options) | ||
25 | { | ||
26 | } | ||
27 | |||
28 | static inline char * | ||
29 | _braille_console_setup(char **str, char **brl_options) | ||
30 | { | ||
31 | return NULL; | ||
32 | } | ||
33 | |||
34 | static inline int | ||
35 | _braille_register_console(struct console *console, struct console_cmdline *c) | ||
36 | { | ||
37 | return 0; | ||
38 | } | ||
39 | |||
40 | static inline int | ||
41 | _braille_unregister_console(struct console *console) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | #endif | ||
47 | |||
48 | #endif | ||
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h new file mode 100644 index 000000000000..cbd69d842341 --- /dev/null +++ b/kernel/printk/console_cmdline.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _CONSOLE_CMDLINE_H | ||
2 | #define _CONSOLE_CMDLINE_H | ||
3 | |||
4 | struct console_cmdline | ||
5 | { | ||
6 | char name[8]; /* Name of the driver */ | ||
7 | int index; /* Minor dev. to use */ | ||
8 | char *options; /* Options for the driver */ | ||
9 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
10 | char *brl_options; /* Options for braille driver */ | ||
11 | #endif | ||
12 | }; | ||
13 | |||
14 | #endif | ||
diff --git a/kernel/printk.c b/kernel/printk/printk.c index d37d45c90ae6..5b5a7080e2a5 100644 --- a/kernel/printk.c +++ b/kernel/printk/printk.c | |||
@@ -51,6 +51,9 @@ | |||
51 | #define CREATE_TRACE_POINTS | 51 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/printk.h> | 52 | #include <trace/events/printk.h> |
53 | 53 | ||
54 | #include "console_cmdline.h" | ||
55 | #include "braille.h" | ||
56 | |||
54 | /* printk's without a loglevel use this.. */ | 57 | /* printk's without a loglevel use this.. */ |
55 | #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL | 58 | #define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL |
56 | 59 | ||
@@ -105,19 +108,11 @@ static struct console *exclusive_console; | |||
105 | /* | 108 | /* |
106 | * Array of consoles built from command line options (console=) | 109 | * Array of consoles built from command line options (console=) |
107 | */ | 110 | */ |
108 | struct console_cmdline | ||
109 | { | ||
110 | char name[8]; /* Name of the driver */ | ||
111 | int index; /* Minor dev. to use */ | ||
112 | char *options; /* Options for the driver */ | ||
113 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | ||
114 | char *brl_options; /* Options for braille driver */ | ||
115 | #endif | ||
116 | }; | ||
117 | 111 | ||
118 | #define MAX_CMDLINECONSOLES 8 | 112 | #define MAX_CMDLINECONSOLES 8 |
119 | 113 | ||
120 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; | 114 | static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES]; |
115 | |||
121 | static int selected_console = -1; | 116 | static int selected_console = -1; |
122 | static int preferred_console = -1; | 117 | static int preferred_console = -1; |
123 | int console_set_on_cmdline; | 118 | int console_set_on_cmdline; |
@@ -178,7 +173,7 @@ static int console_may_schedule; | |||
178 | * 67 "g" | 173 | * 67 "g" |
179 | * 0032 00 00 00 padding to next message header | 174 | * 0032 00 00 00 padding to next message header |
180 | * | 175 | * |
181 | * The 'struct log' buffer header must never be directly exported to | 176 | * The 'struct printk_log' buffer header must never be directly exported to |
182 | * userspace, it is a kernel-private implementation detail that might | 177 | * userspace, it is a kernel-private implementation detail that might |
183 | * need to be changed in the future, when the requirements change. | 178 | * need to be changed in the future, when the requirements change. |
184 | * | 179 | * |
@@ -200,7 +195,7 @@ enum log_flags { | |||
200 | LOG_CONT = 8, /* text is a fragment of a continuation line */ | 195 | LOG_CONT = 8, /* text is a fragment of a continuation line */ |
201 | }; | 196 | }; |
202 | 197 | ||
203 | struct log { | 198 | struct printk_log { |
204 | u64 ts_nsec; /* timestamp in nanoseconds */ | 199 | u64 ts_nsec; /* timestamp in nanoseconds */ |
205 | u16 len; /* length of entire record */ | 200 | u16 len; /* length of entire record */ |
206 | u16 text_len; /* length of text buffer */ | 201 | u16 text_len; /* length of text buffer */ |
@@ -248,7 +243,7 @@ static u32 clear_idx; | |||
248 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) | 243 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) |
249 | #define LOG_ALIGN 4 | 244 | #define LOG_ALIGN 4 |
250 | #else | 245 | #else |
251 | #define LOG_ALIGN __alignof__(struct log) | 246 | #define LOG_ALIGN __alignof__(struct printk_log) |
252 | #endif | 247 | #endif |
253 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) | 248 | #define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) |
254 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); | 249 | static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); |
@@ -259,35 +254,35 @@ static u32 log_buf_len = __LOG_BUF_LEN; | |||
259 | static volatile unsigned int logbuf_cpu = UINT_MAX; | 254 | static volatile unsigned int logbuf_cpu = UINT_MAX; |
260 | 255 | ||
261 | /* human readable text of the record */ | 256 | /* human readable text of the record */ |
262 | static char *log_text(const struct log *msg) | 257 | static char *log_text(const struct printk_log *msg) |
263 | { | 258 | { |
264 | return (char *)msg + sizeof(struct log); | 259 | return (char *)msg + sizeof(struct printk_log); |
265 | } | 260 | } |
266 | 261 | ||
267 | /* optional key/value pair dictionary attached to the record */ | 262 | /* optional key/value pair dictionary attached to the record */ |
268 | static char *log_dict(const struct log *msg) | 263 | static char *log_dict(const struct printk_log *msg) |
269 | { | 264 | { |
270 | return (char *)msg + sizeof(struct log) + msg->text_len; | 265 | return (char *)msg + sizeof(struct printk_log) + msg->text_len; |
271 | } | 266 | } |
272 | 267 | ||
273 | /* get record by index; idx must point to valid msg */ | 268 | /* get record by index; idx must point to valid msg */ |
274 | static struct log *log_from_idx(u32 idx) | 269 | static struct printk_log *log_from_idx(u32 idx) |
275 | { | 270 | { |
276 | struct log *msg = (struct log *)(log_buf + idx); | 271 | struct printk_log *msg = (struct printk_log *)(log_buf + idx); |
277 | 272 | ||
278 | /* | 273 | /* |
279 | * A length == 0 record is the end of buffer marker. Wrap around and | 274 | * A length == 0 record is the end of buffer marker. Wrap around and |
280 | * read the message at the start of the buffer. | 275 | * read the message at the start of the buffer. |
281 | */ | 276 | */ |
282 | if (!msg->len) | 277 | if (!msg->len) |
283 | return (struct log *)log_buf; | 278 | return (struct printk_log *)log_buf; |
284 | return msg; | 279 | return msg; |
285 | } | 280 | } |
286 | 281 | ||
287 | /* get next record; idx must point to valid msg */ | 282 | /* get next record; idx must point to valid msg */ |
288 | static u32 log_next(u32 idx) | 283 | static u32 log_next(u32 idx) |
289 | { | 284 | { |
290 | struct log *msg = (struct log *)(log_buf + idx); | 285 | struct printk_log *msg = (struct printk_log *)(log_buf + idx); |
291 | 286 | ||
292 | /* length == 0 indicates the end of the buffer; wrap */ | 287 | /* length == 0 indicates the end of the buffer; wrap */ |
293 | /* | 288 | /* |
@@ -296,7 +291,7 @@ static u32 log_next(u32 idx) | |||
296 | * return the one after that. | 291 | * return the one after that. |
297 | */ | 292 | */ |
298 | if (!msg->len) { | 293 | if (!msg->len) { |
299 | msg = (struct log *)log_buf; | 294 | msg = (struct printk_log *)log_buf; |
300 | return msg->len; | 295 | return msg->len; |
301 | } | 296 | } |
302 | return idx + msg->len; | 297 | return idx + msg->len; |
@@ -308,11 +303,11 @@ static void log_store(int facility, int level, | |||
308 | const char *dict, u16 dict_len, | 303 | const char *dict, u16 dict_len, |
309 | const char *text, u16 text_len) | 304 | const char *text, u16 text_len) |
310 | { | 305 | { |
311 | struct log *msg; | 306 | struct printk_log *msg; |
312 | u32 size, pad_len; | 307 | u32 size, pad_len; |
313 | 308 | ||
314 | /* number of '\0' padding bytes to next message */ | 309 | /* number of '\0' padding bytes to next message */ |
315 | size = sizeof(struct log) + text_len + dict_len; | 310 | size = sizeof(struct printk_log) + text_len + dict_len; |
316 | pad_len = (-size) & (LOG_ALIGN - 1); | 311 | pad_len = (-size) & (LOG_ALIGN - 1); |
317 | size += pad_len; | 312 | size += pad_len; |
318 | 313 | ||
@@ -324,7 +319,7 @@ static void log_store(int facility, int level, | |||
324 | else | 319 | else |
325 | free = log_first_idx - log_next_idx; | 320 | free = log_first_idx - log_next_idx; |
326 | 321 | ||
327 | if (free > size + sizeof(struct log)) | 322 | if (free > size + sizeof(struct printk_log)) |
328 | break; | 323 | break; |
329 | 324 | ||
330 | /* drop old messages until we have enough contiuous space */ | 325 | /* drop old messages until we have enough contiuous space */ |
@@ -332,18 +327,18 @@ static void log_store(int facility, int level, | |||
332 | log_first_seq++; | 327 | log_first_seq++; |
333 | } | 328 | } |
334 | 329 | ||
335 | if (log_next_idx + size + sizeof(struct log) >= log_buf_len) { | 330 | if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) { |
336 | /* | 331 | /* |
337 | * This message + an additional empty header does not fit | 332 | * This message + an additional empty header does not fit |
338 | * at the end of the buffer. Add an empty header with len == 0 | 333 | * at the end of the buffer. Add an empty header with len == 0 |
339 | * to signify a wrap around. | 334 | * to signify a wrap around. |
340 | */ | 335 | */ |
341 | memset(log_buf + log_next_idx, 0, sizeof(struct log)); | 336 | memset(log_buf + log_next_idx, 0, sizeof(struct printk_log)); |
342 | log_next_idx = 0; | 337 | log_next_idx = 0; |
343 | } | 338 | } |
344 | 339 | ||
345 | /* fill message */ | 340 | /* fill message */ |
346 | msg = (struct log *)(log_buf + log_next_idx); | 341 | msg = (struct printk_log *)(log_buf + log_next_idx); |
347 | memcpy(log_text(msg), text, text_len); | 342 | memcpy(log_text(msg), text, text_len); |
348 | msg->text_len = text_len; | 343 | msg->text_len = text_len; |
349 | memcpy(log_dict(msg), dict, dict_len); | 344 | memcpy(log_dict(msg), dict, dict_len); |
@@ -356,7 +351,7 @@ static void log_store(int facility, int level, | |||
356 | else | 351 | else |
357 | msg->ts_nsec = local_clock(); | 352 | msg->ts_nsec = local_clock(); |
358 | memset(log_dict(msg) + dict_len, 0, pad_len); | 353 | memset(log_dict(msg) + dict_len, 0, pad_len); |
359 | msg->len = sizeof(struct log) + text_len + dict_len + pad_len; | 354 | msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len; |
360 | 355 | ||
361 | /* insert message */ | 356 | /* insert message */ |
362 | log_next_idx += msg->len; | 357 | log_next_idx += msg->len; |
@@ -479,7 +474,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, | |||
479 | size_t count, loff_t *ppos) | 474 | size_t count, loff_t *ppos) |
480 | { | 475 | { |
481 | struct devkmsg_user *user = file->private_data; | 476 | struct devkmsg_user *user = file->private_data; |
482 | struct log *msg; | 477 | struct printk_log *msg; |
483 | u64 ts_usec; | 478 | u64 ts_usec; |
484 | size_t i; | 479 | size_t i; |
485 | char cont = '-'; | 480 | char cont = '-'; |
@@ -724,14 +719,14 @@ void log_buf_kexec_setup(void) | |||
724 | VMCOREINFO_SYMBOL(log_first_idx); | 719 | VMCOREINFO_SYMBOL(log_first_idx); |
725 | VMCOREINFO_SYMBOL(log_next_idx); | 720 | VMCOREINFO_SYMBOL(log_next_idx); |
726 | /* | 721 | /* |
727 | * Export struct log size and field offsets. User space tools can | 722 | * Export struct printk_log size and field offsets. User space tools can |
728 | * parse it and detect any changes to structure down the line. | 723 | * parse it and detect any changes to structure down the line. |
729 | */ | 724 | */ |
730 | VMCOREINFO_STRUCT_SIZE(log); | 725 | VMCOREINFO_STRUCT_SIZE(printk_log); |
731 | VMCOREINFO_OFFSET(log, ts_nsec); | 726 | VMCOREINFO_OFFSET(printk_log, ts_nsec); |
732 | VMCOREINFO_OFFSET(log, len); | 727 | VMCOREINFO_OFFSET(printk_log, len); |
733 | VMCOREINFO_OFFSET(log, text_len); | 728 | VMCOREINFO_OFFSET(printk_log, text_len); |
734 | VMCOREINFO_OFFSET(log, dict_len); | 729 | VMCOREINFO_OFFSET(printk_log, dict_len); |
735 | } | 730 | } |
736 | #endif | 731 | #endif |
737 | 732 | ||
@@ -884,7 +879,7 @@ static size_t print_time(u64 ts, char *buf) | |||
884 | (unsigned long)ts, rem_nsec / 1000); | 879 | (unsigned long)ts, rem_nsec / 1000); |
885 | } | 880 | } |
886 | 881 | ||
887 | static size_t print_prefix(const struct log *msg, bool syslog, char *buf) | 882 | static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf) |
888 | { | 883 | { |
889 | size_t len = 0; | 884 | size_t len = 0; |
890 | unsigned int prefix = (msg->facility << 3) | msg->level; | 885 | unsigned int prefix = (msg->facility << 3) | msg->level; |
@@ -907,7 +902,7 @@ static size_t print_prefix(const struct log *msg, bool syslog, char *buf) | |||
907 | return len; | 902 | return len; |
908 | } | 903 | } |
909 | 904 | ||
910 | static size_t msg_print_text(const struct log *msg, enum log_flags prev, | 905 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, |
911 | bool syslog, char *buf, size_t size) | 906 | bool syslog, char *buf, size_t size) |
912 | { | 907 | { |
913 | const char *text = log_text(msg); | 908 | const char *text = log_text(msg); |
@@ -969,7 +964,7 @@ static size_t msg_print_text(const struct log *msg, enum log_flags prev, | |||
969 | static int syslog_print(char __user *buf, int size) | 964 | static int syslog_print(char __user *buf, int size) |
970 | { | 965 | { |
971 | char *text; | 966 | char *text; |
972 | struct log *msg; | 967 | struct printk_log *msg; |
973 | int len = 0; | 968 | int len = 0; |
974 | 969 | ||
975 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); | 970 | text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); |
@@ -1060,7 +1055,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1060 | idx = clear_idx; | 1055 | idx = clear_idx; |
1061 | prev = 0; | 1056 | prev = 0; |
1062 | while (seq < log_next_seq) { | 1057 | while (seq < log_next_seq) { |
1063 | struct log *msg = log_from_idx(idx); | 1058 | struct printk_log *msg = log_from_idx(idx); |
1064 | 1059 | ||
1065 | len += msg_print_text(msg, prev, true, NULL, 0); | 1060 | len += msg_print_text(msg, prev, true, NULL, 0); |
1066 | prev = msg->flags; | 1061 | prev = msg->flags; |
@@ -1073,7 +1068,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1073 | idx = clear_idx; | 1068 | idx = clear_idx; |
1074 | prev = 0; | 1069 | prev = 0; |
1075 | while (len > size && seq < log_next_seq) { | 1070 | while (len > size && seq < log_next_seq) { |
1076 | struct log *msg = log_from_idx(idx); | 1071 | struct printk_log *msg = log_from_idx(idx); |
1077 | 1072 | ||
1078 | len -= msg_print_text(msg, prev, true, NULL, 0); | 1073 | len -= msg_print_text(msg, prev, true, NULL, 0); |
1079 | prev = msg->flags; | 1074 | prev = msg->flags; |
@@ -1087,7 +1082,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1087 | len = 0; | 1082 | len = 0; |
1088 | prev = 0; | 1083 | prev = 0; |
1089 | while (len >= 0 && seq < next_seq) { | 1084 | while (len >= 0 && seq < next_seq) { |
1090 | struct log *msg = log_from_idx(idx); | 1085 | struct printk_log *msg = log_from_idx(idx); |
1091 | int textlen; | 1086 | int textlen; |
1092 | 1087 | ||
1093 | textlen = msg_print_text(msg, prev, true, text, | 1088 | textlen = msg_print_text(msg, prev, true, text, |
@@ -1233,7 +1228,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
1233 | 1228 | ||
1234 | error = 0; | 1229 | error = 0; |
1235 | while (seq < log_next_seq) { | 1230 | while (seq < log_next_seq) { |
1236 | struct log *msg = log_from_idx(idx); | 1231 | struct printk_log *msg = log_from_idx(idx); |
1237 | 1232 | ||
1238 | error += msg_print_text(msg, prev, true, NULL, 0); | 1233 | error += msg_print_text(msg, prev, true, NULL, 0); |
1239 | idx = log_next(idx); | 1234 | idx = log_next(idx); |
@@ -1719,10 +1714,10 @@ static struct cont { | |||
1719 | u8 level; | 1714 | u8 level; |
1720 | bool flushed:1; | 1715 | bool flushed:1; |
1721 | } cont; | 1716 | } cont; |
1722 | static struct log *log_from_idx(u32 idx) { return NULL; } | 1717 | static struct printk_log *log_from_idx(u32 idx) { return NULL; } |
1723 | static u32 log_next(u32 idx) { return 0; } | 1718 | static u32 log_next(u32 idx) { return 0; } |
1724 | static void call_console_drivers(int level, const char *text, size_t len) {} | 1719 | static void call_console_drivers(int level, const char *text, size_t len) {} |
1725 | static size_t msg_print_text(const struct log *msg, enum log_flags prev, | 1720 | static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev, |
1726 | bool syslog, char *buf, size_t size) { return 0; } | 1721 | bool syslog, char *buf, size_t size) { return 0; } |
1727 | static size_t cont_print_text(char *text, size_t size) { return 0; } | 1722 | static size_t cont_print_text(char *text, size_t size) { return 0; } |
1728 | 1723 | ||
@@ -1761,23 +1756,23 @@ static int __add_preferred_console(char *name, int idx, char *options, | |||
1761 | * See if this tty is not yet registered, and | 1756 | * See if this tty is not yet registered, and |
1762 | * if we have a slot free. | 1757 | * if we have a slot free. |
1763 | */ | 1758 | */ |
1764 | for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) | 1759 | for (i = 0, c = console_cmdline; |
1765 | if (strcmp(console_cmdline[i].name, name) == 0 && | 1760 | i < MAX_CMDLINECONSOLES && c->name[0]; |
1766 | console_cmdline[i].index == idx) { | 1761 | i++, c++) { |
1767 | if (!brl_options) | 1762 | if (strcmp(c->name, name) == 0 && c->index == idx) { |
1768 | selected_console = i; | 1763 | if (!brl_options) |
1769 | return 0; | 1764 | selected_console = i; |
1765 | return 0; | ||
1770 | } | 1766 | } |
1767 | } | ||
1771 | if (i == MAX_CMDLINECONSOLES) | 1768 | if (i == MAX_CMDLINECONSOLES) |
1772 | return -E2BIG; | 1769 | return -E2BIG; |
1773 | if (!brl_options) | 1770 | if (!brl_options) |
1774 | selected_console = i; | 1771 | selected_console = i; |
1775 | c = &console_cmdline[i]; | ||
1776 | strlcpy(c->name, name, sizeof(c->name)); | 1772 | strlcpy(c->name, name, sizeof(c->name)); |
1777 | c->options = options; | 1773 | c->options = options; |
1778 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | 1774 | braille_set_options(c, brl_options); |
1779 | c->brl_options = brl_options; | 1775 | |
1780 | #endif | ||
1781 | c->index = idx; | 1776 | c->index = idx; |
1782 | return 0; | 1777 | return 0; |
1783 | } | 1778 | } |
@@ -1790,20 +1785,8 @@ static int __init console_setup(char *str) | |||
1790 | char *s, *options, *brl_options = NULL; | 1785 | char *s, *options, *brl_options = NULL; |
1791 | int idx; | 1786 | int idx; |
1792 | 1787 | ||
1793 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | 1788 | if (_braille_console_setup(&str, &brl_options)) |
1794 | if (!memcmp(str, "brl,", 4)) { | 1789 | return 1; |
1795 | brl_options = ""; | ||
1796 | str += 4; | ||
1797 | } else if (!memcmp(str, "brl=", 4)) { | ||
1798 | brl_options = str + 4; | ||
1799 | str = strchr(brl_options, ','); | ||
1800 | if (!str) { | ||
1801 | printk(KERN_ERR "need port name after brl=\n"); | ||
1802 | return 1; | ||
1803 | } | ||
1804 | *(str++) = 0; | ||
1805 | } | ||
1806 | #endif | ||
1807 | 1790 | ||
1808 | /* | 1791 | /* |
1809 | * Decode str into name, index, options. | 1792 | * Decode str into name, index, options. |
@@ -1858,15 +1841,15 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha | |||
1858 | struct console_cmdline *c; | 1841 | struct console_cmdline *c; |
1859 | int i; | 1842 | int i; |
1860 | 1843 | ||
1861 | for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++) | 1844 | for (i = 0, c = console_cmdline; |
1862 | if (strcmp(console_cmdline[i].name, name) == 0 && | 1845 | i < MAX_CMDLINECONSOLES && c->name[0]; |
1863 | console_cmdline[i].index == idx) { | 1846 | i++, c++) |
1864 | c = &console_cmdline[i]; | 1847 | if (strcmp(c->name, name) == 0 && c->index == idx) { |
1865 | strlcpy(c->name, name_new, sizeof(c->name)); | 1848 | strlcpy(c->name, name_new, sizeof(c->name)); |
1866 | c->name[sizeof(c->name) - 1] = 0; | 1849 | c->name[sizeof(c->name) - 1] = 0; |
1867 | c->options = options; | 1850 | c->options = options; |
1868 | c->index = idx_new; | 1851 | c->index = idx_new; |
1869 | return i; | 1852 | return i; |
1870 | } | 1853 | } |
1871 | /* not found */ | 1854 | /* not found */ |
1872 | return -1; | 1855 | return -1; |
@@ -1921,7 +1904,7 @@ void resume_console(void) | |||
1921 | * called when a new CPU comes online (or fails to come up), and ensures | 1904 | * called when a new CPU comes online (or fails to come up), and ensures |
1922 | * that any such output gets printed. | 1905 | * that any such output gets printed. |
1923 | */ | 1906 | */ |
1924 | static int __cpuinit console_cpu_notify(struct notifier_block *self, | 1907 | static int console_cpu_notify(struct notifier_block *self, |
1925 | unsigned long action, void *hcpu) | 1908 | unsigned long action, void *hcpu) |
1926 | { | 1909 | { |
1927 | switch (action) { | 1910 | switch (action) { |
@@ -2046,7 +2029,7 @@ void console_unlock(void) | |||
2046 | console_cont_flush(text, sizeof(text)); | 2029 | console_cont_flush(text, sizeof(text)); |
2047 | again: | 2030 | again: |
2048 | for (;;) { | 2031 | for (;;) { |
2049 | struct log *msg; | 2032 | struct printk_log *msg; |
2050 | size_t len; | 2033 | size_t len; |
2051 | int level; | 2034 | int level; |
2052 | 2035 | ||
@@ -2241,6 +2224,7 @@ void register_console(struct console *newcon) | |||
2241 | int i; | 2224 | int i; |
2242 | unsigned long flags; | 2225 | unsigned long flags; |
2243 | struct console *bcon = NULL; | 2226 | struct console *bcon = NULL; |
2227 | struct console_cmdline *c; | ||
2244 | 2228 | ||
2245 | /* | 2229 | /* |
2246 | * before we register a new CON_BOOT console, make sure we don't | 2230 | * before we register a new CON_BOOT console, make sure we don't |
@@ -2288,30 +2272,25 @@ void register_console(struct console *newcon) | |||
2288 | * See if this console matches one we selected on | 2272 | * See if this console matches one we selected on |
2289 | * the command line. | 2273 | * the command line. |
2290 | */ | 2274 | */ |
2291 | for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; | 2275 | for (i = 0, c = console_cmdline; |
2292 | i++) { | 2276 | i < MAX_CMDLINECONSOLES && c->name[0]; |
2293 | if (strcmp(console_cmdline[i].name, newcon->name) != 0) | 2277 | i++, c++) { |
2278 | if (strcmp(c->name, newcon->name) != 0) | ||
2294 | continue; | 2279 | continue; |
2295 | if (newcon->index >= 0 && | 2280 | if (newcon->index >= 0 && |
2296 | newcon->index != console_cmdline[i].index) | 2281 | newcon->index != c->index) |
2297 | continue; | 2282 | continue; |
2298 | if (newcon->index < 0) | 2283 | if (newcon->index < 0) |
2299 | newcon->index = console_cmdline[i].index; | 2284 | newcon->index = c->index; |
2300 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | 2285 | |
2301 | if (console_cmdline[i].brl_options) { | 2286 | if (_braille_register_console(newcon, c)) |
2302 | newcon->flags |= CON_BRL; | ||
2303 | braille_register_console(newcon, | ||
2304 | console_cmdline[i].index, | ||
2305 | console_cmdline[i].options, | ||
2306 | console_cmdline[i].brl_options); | ||
2307 | return; | 2287 | return; |
2308 | } | 2288 | |
2309 | #endif | ||
2310 | if (newcon->setup && | 2289 | if (newcon->setup && |
2311 | newcon->setup(newcon, console_cmdline[i].options) != 0) | 2290 | newcon->setup(newcon, console_cmdline[i].options) != 0) |
2312 | break; | 2291 | break; |
2313 | newcon->flags |= CON_ENABLED; | 2292 | newcon->flags |= CON_ENABLED; |
2314 | newcon->index = console_cmdline[i].index; | 2293 | newcon->index = c->index; |
2315 | if (i == selected_console) { | 2294 | if (i == selected_console) { |
2316 | newcon->flags |= CON_CONSDEV; | 2295 | newcon->flags |= CON_CONSDEV; |
2317 | preferred_console = selected_console; | 2296 | preferred_console = selected_console; |
@@ -2394,13 +2373,13 @@ EXPORT_SYMBOL(register_console); | |||
2394 | int unregister_console(struct console *console) | 2373 | int unregister_console(struct console *console) |
2395 | { | 2374 | { |
2396 | struct console *a, *b; | 2375 | struct console *a, *b; |
2397 | int res = 1; | 2376 | int res; |
2398 | 2377 | ||
2399 | #ifdef CONFIG_A11Y_BRAILLE_CONSOLE | 2378 | res = _braille_unregister_console(console); |
2400 | if (console->flags & CON_BRL) | 2379 | if (res) |
2401 | return braille_unregister_console(console); | 2380 | return res; |
2402 | #endif | ||
2403 | 2381 | ||
2382 | res = 1; | ||
2404 | console_lock(); | 2383 | console_lock(); |
2405 | if (console_drivers == console) { | 2384 | if (console_drivers == console) { |
2406 | console_drivers=console->next; | 2385 | console_drivers=console->next; |
@@ -2666,7 +2645,7 @@ void kmsg_dump(enum kmsg_dump_reason reason) | |||
2666 | bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, | 2645 | bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog, |
2667 | char *line, size_t size, size_t *len) | 2646 | char *line, size_t size, size_t *len) |
2668 | { | 2647 | { |
2669 | struct log *msg; | 2648 | struct printk_log *msg; |
2670 | size_t l = 0; | 2649 | size_t l = 0; |
2671 | bool ret = false; | 2650 | bool ret = false; |
2672 | 2651 | ||
@@ -2778,7 +2757,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
2778 | idx = dumper->cur_idx; | 2757 | idx = dumper->cur_idx; |
2779 | prev = 0; | 2758 | prev = 0; |
2780 | while (seq < dumper->next_seq) { | 2759 | while (seq < dumper->next_seq) { |
2781 | struct log *msg = log_from_idx(idx); | 2760 | struct printk_log *msg = log_from_idx(idx); |
2782 | 2761 | ||
2783 | l += msg_print_text(msg, prev, true, NULL, 0); | 2762 | l += msg_print_text(msg, prev, true, NULL, 0); |
2784 | idx = log_next(idx); | 2763 | idx = log_next(idx); |
@@ -2791,7 +2770,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
2791 | idx = dumper->cur_idx; | 2770 | idx = dumper->cur_idx; |
2792 | prev = 0; | 2771 | prev = 0; |
2793 | while (l > size && seq < dumper->next_seq) { | 2772 | while (l > size && seq < dumper->next_seq) { |
2794 | struct log *msg = log_from_idx(idx); | 2773 | struct printk_log *msg = log_from_idx(idx); |
2795 | 2774 | ||
2796 | l -= msg_print_text(msg, prev, true, NULL, 0); | 2775 | l -= msg_print_text(msg, prev, true, NULL, 0); |
2797 | idx = log_next(idx); | 2776 | idx = log_next(idx); |
@@ -2806,7 +2785,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
2806 | l = 0; | 2785 | l = 0; |
2807 | prev = 0; | 2786 | prev = 0; |
2808 | while (seq < dumper->next_seq) { | 2787 | while (seq < dumper->next_seq) { |
2809 | struct log *msg = log_from_idx(idx); | 2788 | struct printk_log *msg = log_from_idx(idx); |
2810 | 2789 | ||
2811 | l += msg_print_text(msg, prev, syslog, buf + l, size - l); | 2790 | l += msg_print_text(msg, prev, syslog, buf + l, size - l); |
2812 | idx = log_next(idx); | 2791 | idx = log_next(idx); |
diff --git a/kernel/profile.c b/kernel/profile.c index 0bf400737660..6631e1ef55ab 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -331,7 +331,7 @@ out: | |||
331 | put_cpu(); | 331 | put_cpu(); |
332 | } | 332 | } |
333 | 333 | ||
334 | static int __cpuinit profile_cpu_callback(struct notifier_block *info, | 334 | static int profile_cpu_callback(struct notifier_block *info, |
335 | unsigned long action, void *__cpu) | 335 | unsigned long action, void *__cpu) |
336 | { | 336 | { |
337 | int node, cpu = (unsigned long)__cpu; | 337 | int node, cpu = (unsigned long)__cpu; |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index b1fa5510388d..f4871e52c546 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -1476,7 +1476,7 @@ rcu_torture_shutdown(void *arg) | |||
1476 | * Execute random CPU-hotplug operations at the interval specified | 1476 | * Execute random CPU-hotplug operations at the interval specified |
1477 | * by the onoff_interval. | 1477 | * by the onoff_interval. |
1478 | */ | 1478 | */ |
1479 | static int __cpuinit | 1479 | static int |
1480 | rcu_torture_onoff(void *arg) | 1480 | rcu_torture_onoff(void *arg) |
1481 | { | 1481 | { |
1482 | int cpu; | 1482 | int cpu; |
@@ -1558,7 +1558,7 @@ rcu_torture_onoff(void *arg) | |||
1558 | return 0; | 1558 | return 0; |
1559 | } | 1559 | } |
1560 | 1560 | ||
1561 | static int __cpuinit | 1561 | static int |
1562 | rcu_torture_onoff_init(void) | 1562 | rcu_torture_onoff_init(void) |
1563 | { | 1563 | { |
1564 | int ret; | 1564 | int ret; |
@@ -1601,7 +1601,7 @@ static void rcu_torture_onoff_cleanup(void) | |||
1601 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then | 1601 | * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then |
1602 | * induces a CPU stall for the time specified by stall_cpu. | 1602 | * induces a CPU stall for the time specified by stall_cpu. |
1603 | */ | 1603 | */ |
1604 | static int __cpuinit rcu_torture_stall(void *args) | 1604 | static int rcu_torture_stall(void *args) |
1605 | { | 1605 | { |
1606 | unsigned long stop_at; | 1606 | unsigned long stop_at; |
1607 | 1607 | ||
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index e08abb9461ac..068de3a93606 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -2910,7 +2910,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) | |||
2910 | * can accept some slop in the rsp->completed access due to the fact | 2910 | * can accept some slop in the rsp->completed access due to the fact |
2911 | * that this CPU cannot possibly have any RCU callbacks in flight yet. | 2911 | * that this CPU cannot possibly have any RCU callbacks in flight yet. |
2912 | */ | 2912 | */ |
2913 | static void __cpuinit | 2913 | static void |
2914 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | 2914 | rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) |
2915 | { | 2915 | { |
2916 | unsigned long flags; | 2916 | unsigned long flags; |
@@ -2962,7 +2962,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) | |||
2962 | mutex_unlock(&rsp->onoff_mutex); | 2962 | mutex_unlock(&rsp->onoff_mutex); |
2963 | } | 2963 | } |
2964 | 2964 | ||
2965 | static void __cpuinit rcu_prepare_cpu(int cpu) | 2965 | static void rcu_prepare_cpu(int cpu) |
2966 | { | 2966 | { |
2967 | struct rcu_state *rsp; | 2967 | struct rcu_state *rsp; |
2968 | 2968 | ||
@@ -2974,7 +2974,7 @@ static void __cpuinit rcu_prepare_cpu(int cpu) | |||
2974 | /* | 2974 | /* |
2975 | * Handle CPU online/offline notification events. | 2975 | * Handle CPU online/offline notification events. |
2976 | */ | 2976 | */ |
2977 | static int __cpuinit rcu_cpu_notify(struct notifier_block *self, | 2977 | static int rcu_cpu_notify(struct notifier_block *self, |
2978 | unsigned long action, void *hcpu) | 2978 | unsigned long action, void *hcpu) |
2979 | { | 2979 | { |
2980 | long cpu = (long)hcpu; | 2980 | long cpu = (long)hcpu; |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 4a39d364493c..b3832581043c 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
@@ -521,10 +521,10 @@ static void invoke_rcu_callbacks_kthread(void); | |||
521 | static bool rcu_is_callbacks_kthread(void); | 521 | static bool rcu_is_callbacks_kthread(void); |
522 | #ifdef CONFIG_RCU_BOOST | 522 | #ifdef CONFIG_RCU_BOOST |
523 | static void rcu_preempt_do_callbacks(void); | 523 | static void rcu_preempt_do_callbacks(void); |
524 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 524 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
525 | struct rcu_node *rnp); | 525 | struct rcu_node *rnp); |
526 | #endif /* #ifdef CONFIG_RCU_BOOST */ | 526 | #endif /* #ifdef CONFIG_RCU_BOOST */ |
527 | static void __cpuinit rcu_prepare_kthreads(int cpu); | 527 | static void rcu_prepare_kthreads(int cpu); |
528 | static void rcu_cleanup_after_idle(int cpu); | 528 | static void rcu_cleanup_after_idle(int cpu); |
529 | static void rcu_prepare_for_idle(int cpu); | 529 | static void rcu_prepare_for_idle(int cpu); |
530 | static void rcu_idle_count_callbacks_posted(void); | 530 | static void rcu_idle_count_callbacks_posted(void); |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index 63098a59216e..769e12e3151b 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -1352,7 +1352,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) | |||
1352 | * already exist. We only create this kthread for preemptible RCU. | 1352 | * already exist. We only create this kthread for preemptible RCU. |
1353 | * Returns zero if all is well, a negated errno otherwise. | 1353 | * Returns zero if all is well, a negated errno otherwise. |
1354 | */ | 1354 | */ |
1355 | static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, | 1355 | static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, |
1356 | struct rcu_node *rnp) | 1356 | struct rcu_node *rnp) |
1357 | { | 1357 | { |
1358 | int rnp_index = rnp - &rsp->node[0]; | 1358 | int rnp_index = rnp - &rsp->node[0]; |
@@ -1507,7 +1507,7 @@ static int __init rcu_spawn_kthreads(void) | |||
1507 | } | 1507 | } |
1508 | early_initcall(rcu_spawn_kthreads); | 1508 | early_initcall(rcu_spawn_kthreads); |
1509 | 1509 | ||
1510 | static void __cpuinit rcu_prepare_kthreads(int cpu) | 1510 | static void rcu_prepare_kthreads(int cpu) |
1511 | { | 1511 | { |
1512 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); | 1512 | struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); |
1513 | struct rcu_node *rnp = rdp->mynode; | 1513 | struct rcu_node *rnp = rdp->mynode; |
@@ -1549,7 +1549,7 @@ static int __init rcu_scheduler_really_started(void) | |||
1549 | } | 1549 | } |
1550 | early_initcall(rcu_scheduler_really_started); | 1550 | early_initcall(rcu_scheduler_really_started); |
1551 | 1551 | ||
1552 | static void __cpuinit rcu_prepare_kthreads(int cpu) | 1552 | static void rcu_prepare_kthreads(int cpu) |
1553 | { | 1553 | { |
1554 | } | 1554 | } |
1555 | 1555 | ||
diff --git a/kernel/relay.c b/kernel/relay.c index b91488ba2e5a..5001c9887db1 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -516,7 +516,7 @@ static void setup_callbacks(struct rchan *chan, | |||
516 | * | 516 | * |
517 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | 517 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) |
518 | */ | 518 | */ |
519 | static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, | 519 | static int relay_hotcpu_callback(struct notifier_block *nb, |
520 | unsigned long action, | 520 | unsigned long action, |
521 | void *hcpu) | 521 | void *hcpu) |
522 | { | 522 | { |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0d8eb4525e76..b7c32cb7bfeb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4133,7 +4133,7 @@ void show_state_filter(unsigned long state_filter) | |||
4133 | debug_show_all_locks(); | 4133 | debug_show_all_locks(); |
4134 | } | 4134 | } |
4135 | 4135 | ||
4136 | void __cpuinit init_idle_bootup_task(struct task_struct *idle) | 4136 | void init_idle_bootup_task(struct task_struct *idle) |
4137 | { | 4137 | { |
4138 | idle->sched_class = &idle_sched_class; | 4138 | idle->sched_class = &idle_sched_class; |
4139 | } | 4139 | } |
@@ -4146,7 +4146,7 @@ void __cpuinit init_idle_bootup_task(struct task_struct *idle) | |||
4146 | * NOTE: this function does not set the idle thread's NEED_RESCHED | 4146 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
4147 | * flag, to make booting more robust. | 4147 | * flag, to make booting more robust. |
4148 | */ | 4148 | */ |
4149 | void __cpuinit init_idle(struct task_struct *idle, int cpu) | 4149 | void init_idle(struct task_struct *idle, int cpu) |
4150 | { | 4150 | { |
4151 | struct rq *rq = cpu_rq(cpu); | 4151 | struct rq *rq = cpu_rq(cpu); |
4152 | unsigned long flags; | 4152 | unsigned long flags; |
@@ -4630,7 +4630,7 @@ static void set_rq_offline(struct rq *rq) | |||
4630 | * migration_call - callback that gets triggered when a CPU is added. | 4630 | * migration_call - callback that gets triggered when a CPU is added. |
4631 | * Here we can start up the necessary migration thread for the new CPU. | 4631 | * Here we can start up the necessary migration thread for the new CPU. |
4632 | */ | 4632 | */ |
4633 | static int __cpuinit | 4633 | static int |
4634 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | 4634 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
4635 | { | 4635 | { |
4636 | int cpu = (long)hcpu; | 4636 | int cpu = (long)hcpu; |
@@ -4684,12 +4684,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
4684 | * happens before everything else. This has to be lower priority than | 4684 | * happens before everything else. This has to be lower priority than |
4685 | * the notifier in the perf_event subsystem, though. | 4685 | * the notifier in the perf_event subsystem, though. |
4686 | */ | 4686 | */ |
4687 | static struct notifier_block __cpuinitdata migration_notifier = { | 4687 | static struct notifier_block migration_notifier = { |
4688 | .notifier_call = migration_call, | 4688 | .notifier_call = migration_call, |
4689 | .priority = CPU_PRI_MIGRATION, | 4689 | .priority = CPU_PRI_MIGRATION, |
4690 | }; | 4690 | }; |
4691 | 4691 | ||
4692 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | 4692 | static int sched_cpu_active(struct notifier_block *nfb, |
4693 | unsigned long action, void *hcpu) | 4693 | unsigned long action, void *hcpu) |
4694 | { | 4694 | { |
4695 | switch (action & ~CPU_TASKS_FROZEN) { | 4695 | switch (action & ~CPU_TASKS_FROZEN) { |
@@ -4702,7 +4702,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | |||
4702 | } | 4702 | } |
4703 | } | 4703 | } |
4704 | 4704 | ||
4705 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, | 4705 | static int sched_cpu_inactive(struct notifier_block *nfb, |
4706 | unsigned long action, void *hcpu) | 4706 | unsigned long action, void *hcpu) |
4707 | { | 4707 | { |
4708 | switch (action & ~CPU_TASKS_FROZEN) { | 4708 | switch (action & ~CPU_TASKS_FROZEN) { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f77f9c527449..9565645e3202 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -851,7 +851,7 @@ void task_numa_fault(int node, int pages, bool migrated) | |||
851 | { | 851 | { |
852 | struct task_struct *p = current; | 852 | struct task_struct *p = current; |
853 | 853 | ||
854 | if (!sched_feat_numa(NUMA)) | 854 | if (!numabalancing_enabled) |
855 | return; | 855 | return; |
856 | 856 | ||
857 | /* FIXME: Allocate task-specific structure for placement policy here */ | 857 | /* FIXME: Allocate task-specific structure for placement policy here */ |
@@ -5506,7 +5506,7 @@ void nohz_balance_enter_idle(int cpu) | |||
5506 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 5506 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
5507 | } | 5507 | } |
5508 | 5508 | ||
5509 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | 5509 | static int sched_ilb_notifier(struct notifier_block *nfb, |
5510 | unsigned long action, void *hcpu) | 5510 | unsigned long action, void *hcpu) |
5511 | { | 5511 | { |
5512 | switch (action & ~CPU_TASKS_FROZEN) { | 5512 | switch (action & ~CPU_TASKS_FROZEN) { |
@@ -5786,7 +5786,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) | |||
5786 | entity_tick(cfs_rq, se, queued); | 5786 | entity_tick(cfs_rq, se, queued); |
5787 | } | 5787 | } |
5788 | 5788 | ||
5789 | if (sched_feat_numa(NUMA)) | 5789 | if (numabalancing_enabled) |
5790 | task_tick_numa(rq, curr); | 5790 | task_tick_numa(rq, curr); |
5791 | 5791 | ||
5792 | update_rq_runnable_avg(rq, 1); | 5792 | update_rq_runnable_avg(rq, 1); |
diff --git a/kernel/smp.c b/kernel/smp.c index 4dba0f7b72ad..fe9f773d7114 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
73 | return NOTIFY_OK; | 73 | return NOTIFY_OK; |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { | 76 | static struct notifier_block hotplug_cfd_notifier = { |
77 | .notifier_call = hotplug_cfd, | 77 | .notifier_call = hotplug_cfd, |
78 | }; | 78 | }; |
79 | 79 | ||
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 02fc5c933673..eb89e1807408 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | static DEFINE_PER_CPU(struct task_struct *, idle_threads); | 25 | static DEFINE_PER_CPU(struct task_struct *, idle_threads); |
26 | 26 | ||
27 | struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) | 27 | struct task_struct *idle_thread_get(unsigned int cpu) |
28 | { | 28 | { |
29 | struct task_struct *tsk = per_cpu(idle_threads, cpu); | 29 | struct task_struct *tsk = per_cpu(idle_threads, cpu); |
30 | 30 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index ca25e6e704a2..be3d3514c325 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -699,7 +699,7 @@ void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |||
699 | } | 699 | } |
700 | EXPORT_SYMBOL(send_remote_softirq); | 700 | EXPORT_SYMBOL(send_remote_softirq); |
701 | 701 | ||
702 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | 702 | static int remote_softirq_cpu_notify(struct notifier_block *self, |
703 | unsigned long action, void *hcpu) | 703 | unsigned long action, void *hcpu) |
704 | { | 704 | { |
705 | /* | 705 | /* |
@@ -728,7 +728,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | |||
728 | return NOTIFY_OK; | 728 | return NOTIFY_OK; |
729 | } | 729 | } |
730 | 730 | ||
731 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | 731 | static struct notifier_block remote_softirq_cpu_notifier = { |
732 | .notifier_call = remote_softirq_cpu_notify, | 732 | .notifier_call = remote_softirq_cpu_notify, |
733 | }; | 733 | }; |
734 | 734 | ||
@@ -830,7 +830,7 @@ static void takeover_tasklets(unsigned int cpu) | |||
830 | } | 830 | } |
831 | #endif /* CONFIG_HOTPLUG_CPU */ | 831 | #endif /* CONFIG_HOTPLUG_CPU */ |
832 | 832 | ||
833 | static int __cpuinit cpu_callback(struct notifier_block *nfb, | 833 | static int cpu_callback(struct notifier_block *nfb, |
834 | unsigned long action, | 834 | unsigned long action, |
835 | void *hcpu) | 835 | void *hcpu) |
836 | { | 836 | { |
@@ -845,7 +845,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
845 | return NOTIFY_OK; | 845 | return NOTIFY_OK; |
846 | } | 846 | } |
847 | 847 | ||
848 | static struct notifier_block __cpuinitdata cpu_nfb = { | 848 | static struct notifier_block cpu_nfb = { |
849 | .notifier_call = cpu_callback | 849 | .notifier_call = cpu_callback |
850 | }; | 850 | }; |
851 | 851 | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index ac09d98490aa..07f6fc468e17 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -2346,7 +2346,11 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, | |||
2346 | int write, void *data) | 2346 | int write, void *data) |
2347 | { | 2347 | { |
2348 | if (write) { | 2348 | if (write) { |
2349 | *valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); | 2349 | unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); |
2350 | |||
2351 | if (jif > INT_MAX) | ||
2352 | return 1; | ||
2353 | *valp = (int)jif; | ||
2350 | } else { | 2354 | } else { |
2351 | int val = *valp; | 2355 | int val = *valp; |
2352 | unsigned long lval; | 2356 | unsigned long lval; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 69601726a745..e77edc97e036 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -298,7 +298,7 @@ static int __init tick_nohz_full_setup(char *str) | |||
298 | } | 298 | } |
299 | __setup("nohz_full=", tick_nohz_full_setup); | 299 | __setup("nohz_full=", tick_nohz_full_setup); |
300 | 300 | ||
301 | static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, | 301 | static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, |
302 | unsigned long action, | 302 | unsigned long action, |
303 | void *hcpu) | 303 | void *hcpu) |
304 | { | 304 | { |
@@ -827,13 +827,10 @@ void tick_nohz_irq_exit(void) | |||
827 | { | 827 | { |
828 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 828 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
829 | 829 | ||
830 | if (ts->inidle) { | 830 | if (ts->inidle) |
831 | /* Cancel the timer because CPU already waken up from the C-states*/ | ||
832 | menu_hrtimer_cancel(); | ||
833 | __tick_nohz_idle_enter(ts); | 831 | __tick_nohz_idle_enter(ts); |
834 | } else { | 832 | else |
835 | tick_nohz_full_stop_tick(ts); | 833 | tick_nohz_full_stop_tick(ts); |
836 | } | ||
837 | } | 834 | } |
838 | 835 | ||
839 | /** | 836 | /** |
@@ -931,8 +928,6 @@ void tick_nohz_idle_exit(void) | |||
931 | 928 | ||
932 | ts->inidle = 0; | 929 | ts->inidle = 0; |
933 | 930 | ||
934 | /* Cancel the timer because CPU already waken up from the C-states*/ | ||
935 | menu_hrtimer_cancel(); | ||
936 | if (ts->idle_active || ts->tick_stopped) | 931 | if (ts->idle_active || ts->tick_stopped) |
937 | now = ktime_get(); | 932 | now = ktime_get(); |
938 | 933 | ||
diff --git a/kernel/timer.c b/kernel/timer.c index 15bc1b41021d..4296d13db3d1 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1505,11 +1505,11 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |||
1505 | } | 1505 | } |
1506 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 1506 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
1507 | 1507 | ||
1508 | static int __cpuinit init_timers_cpu(int cpu) | 1508 | static int init_timers_cpu(int cpu) |
1509 | { | 1509 | { |
1510 | int j; | 1510 | int j; |
1511 | struct tvec_base *base; | 1511 | struct tvec_base *base; |
1512 | static char __cpuinitdata tvec_base_done[NR_CPUS]; | 1512 | static char tvec_base_done[NR_CPUS]; |
1513 | 1513 | ||
1514 | if (!tvec_base_done[cpu]) { | 1514 | if (!tvec_base_done[cpu]) { |
1515 | static char boot_done; | 1515 | static char boot_done; |
@@ -1577,7 +1577,7 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea | |||
1577 | } | 1577 | } |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static void __cpuinit migrate_timers(int cpu) | 1580 | static void migrate_timers(int cpu) |
1581 | { | 1581 | { |
1582 | struct tvec_base *old_base; | 1582 | struct tvec_base *old_base; |
1583 | struct tvec_base *new_base; | 1583 | struct tvec_base *new_base; |
@@ -1610,7 +1610,7 @@ static void __cpuinit migrate_timers(int cpu) | |||
1610 | } | 1610 | } |
1611 | #endif /* CONFIG_HOTPLUG_CPU */ | 1611 | #endif /* CONFIG_HOTPLUG_CPU */ |
1612 | 1612 | ||
1613 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, | 1613 | static int timer_cpu_notify(struct notifier_block *self, |
1614 | unsigned long action, void *hcpu) | 1614 | unsigned long action, void *hcpu) |
1615 | { | 1615 | { |
1616 | long cpu = (long)hcpu; | 1616 | long cpu = (long)hcpu; |
@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self, | |||
1635 | return NOTIFY_OK; | 1635 | return NOTIFY_OK; |
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | static struct notifier_block __cpuinitdata timers_nb = { | 1638 | static struct notifier_block timers_nb = { |
1639 | .notifier_call = timer_cpu_notify, | 1639 | .notifier_call = timer_cpu_notify, |
1640 | }; | 1640 | }; |
1641 | 1641 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 67708f46baae..8ce9eefc5bb4 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
1441 | * the hashes are freed with call_rcu_sched(). | 1441 | * the hashes are freed with call_rcu_sched(). |
1442 | */ | 1442 | */ |
1443 | static int | 1443 | static int |
1444 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | 1444 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
1445 | { | 1445 | { |
1446 | struct ftrace_hash *filter_hash; | 1446 | struct ftrace_hash *filter_hash; |
1447 | struct ftrace_hash *notrace_hash; | 1447 | struct ftrace_hash *notrace_hash; |
1448 | int ret; | 1448 | int ret; |
1449 | 1449 | ||
1450 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | ||
1451 | /* | ||
1452 | * There's a small race when adding ops that the ftrace handler | ||
1453 | * that wants regs, may be called without them. We can not | ||
1454 | * allow that handler to be called if regs is NULL. | ||
1455 | */ | ||
1456 | if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS)) | ||
1457 | return 0; | ||
1458 | #endif | ||
1459 | |||
1450 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); | 1460 | filter_hash = rcu_dereference_raw_notrace(ops->filter_hash); |
1451 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); | 1461 | notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash); |
1452 | 1462 | ||
@@ -4218,7 +4228,7 @@ static inline void ftrace_startup_enable(int command) { } | |||
4218 | # define ftrace_shutdown_sysctl() do { } while (0) | 4228 | # define ftrace_shutdown_sysctl() do { } while (0) |
4219 | 4229 | ||
4220 | static inline int | 4230 | static inline int |
4221 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) | 4231 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
4222 | { | 4232 | { |
4223 | return 1; | 4233 | return 1; |
4224 | } | 4234 | } |
@@ -4241,7 +4251,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, | |||
4241 | do_for_each_ftrace_op(op, ftrace_control_list) { | 4251 | do_for_each_ftrace_op(op, ftrace_control_list) { |
4242 | if (!(op->flags & FTRACE_OPS_FL_STUB) && | 4252 | if (!(op->flags & FTRACE_OPS_FL_STUB) && |
4243 | !ftrace_function_local_disabled(op) && | 4253 | !ftrace_function_local_disabled(op) && |
4244 | ftrace_ops_test(op, ip)) | 4254 | ftrace_ops_test(op, ip, regs)) |
4245 | op->func(ip, parent_ip, op, regs); | 4255 | op->func(ip, parent_ip, op, regs); |
4246 | } while_for_each_ftrace_op(op); | 4256 | } while_for_each_ftrace_op(op); |
4247 | trace_recursion_clear(TRACE_CONTROL_BIT); | 4257 | trace_recursion_clear(TRACE_CONTROL_BIT); |
@@ -4274,7 +4284,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | |||
4274 | */ | 4284 | */ |
4275 | preempt_disable_notrace(); | 4285 | preempt_disable_notrace(); |
4276 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 4286 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
4277 | if (ftrace_ops_test(op, ip)) | 4287 | if (ftrace_ops_test(op, ip, regs)) |
4278 | op->func(ip, parent_ip, op, regs); | 4288 | op->func(ip, parent_ip, op, regs); |
4279 | } while_for_each_ftrace_op(op); | 4289 | } while_for_each_ftrace_op(op); |
4280 | preempt_enable_notrace(); | 4290 | preempt_enable_notrace(); |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e444ff88f0a4..cc2f66f68dc5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -36,11 +36,11 @@ int ring_buffer_print_entry_header(struct trace_seq *s) | |||
36 | { | 36 | { |
37 | int ret; | 37 | int ret; |
38 | 38 | ||
39 | ret = trace_seq_printf(s, "# compressed entry header\n"); | 39 | ret = trace_seq_puts(s, "# compressed entry header\n"); |
40 | ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); | 40 | ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); |
41 | ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); | 41 | ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); |
42 | ret = trace_seq_printf(s, "\tarray : 32 bits\n"); | 42 | ret = trace_seq_puts(s, "\tarray : 32 bits\n"); |
43 | ret = trace_seq_printf(s, "\n"); | 43 | ret = trace_seq_putc(s, '\n'); |
44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", | 44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", |
45 | RINGBUF_TYPE_PADDING); | 45 | RINGBUF_TYPE_PADDING); |
46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", |
@@ -1066,7 +1066,7 @@ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, | |||
1066 | } | 1066 | } |
1067 | 1067 | ||
1068 | /** | 1068 | /** |
1069 | * check_pages - integrity check of buffer pages | 1069 | * rb_check_pages - integrity check of buffer pages |
1070 | * @cpu_buffer: CPU buffer with pages to test | 1070 | * @cpu_buffer: CPU buffer with pages to test |
1071 | * | 1071 | * |
1072 | * As a safety measure we check to make sure the data pages have not | 1072 | * As a safety measure we check to make sure the data pages have not |
@@ -1258,7 +1258,7 @@ static int rb_cpu_notify(struct notifier_block *self, | |||
1258 | #endif | 1258 | #endif |
1259 | 1259 | ||
1260 | /** | 1260 | /** |
1261 | * ring_buffer_alloc - allocate a new ring_buffer | 1261 | * __ring_buffer_alloc - allocate a new ring_buffer |
1262 | * @size: the size in bytes per cpu that is needed. | 1262 | * @size: the size in bytes per cpu that is needed. |
1263 | * @flags: attributes to set for the ring buffer. | 1263 | * @flags: attributes to set for the ring buffer. |
1264 | * | 1264 | * |
@@ -1607,6 +1607,7 @@ static void update_pages_handler(struct work_struct *work) | |||
1607 | * ring_buffer_resize - resize the ring buffer | 1607 | * ring_buffer_resize - resize the ring buffer |
1608 | * @buffer: the buffer to resize. | 1608 | * @buffer: the buffer to resize. |
1609 | * @size: the new size. | 1609 | * @size: the new size. |
1610 | * @cpu_id: the cpu buffer to resize | ||
1610 | * | 1611 | * |
1611 | * Minimum size is 2 * BUF_PAGE_SIZE. | 1612 | * Minimum size is 2 * BUF_PAGE_SIZE. |
1612 | * | 1613 | * |
@@ -3956,11 +3957,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); | |||
3956 | * expected. | 3957 | * expected. |
3957 | * | 3958 | * |
3958 | * After a sequence of ring_buffer_read_prepare calls, the user is | 3959 | * After a sequence of ring_buffer_read_prepare calls, the user is |
3959 | * expected to make at least one call to ring_buffer_prepare_sync. | 3960 | * expected to make at least one call to ring_buffer_read_prepare_sync. |
3960 | * Afterwards, ring_buffer_read_start is invoked to get things going | 3961 | * Afterwards, ring_buffer_read_start is invoked to get things going |
3961 | * for real. | 3962 | * for real. |
3962 | * | 3963 | * |
3963 | * This overall must be paired with ring_buffer_finish. | 3964 | * This overall must be paired with ring_buffer_read_finish. |
3964 | */ | 3965 | */ |
3965 | struct ring_buffer_iter * | 3966 | struct ring_buffer_iter * |
3966 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) | 3967 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) |
@@ -4009,7 +4010,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); | |||
4009 | * an intervening ring_buffer_read_prepare_sync must have been | 4010 | * an intervening ring_buffer_read_prepare_sync must have been |
4010 | * performed. | 4011 | * performed. |
4011 | * | 4012 | * |
4012 | * Must be paired with ring_buffer_finish. | 4013 | * Must be paired with ring_buffer_read_finish. |
4013 | */ | 4014 | */ |
4014 | void | 4015 | void |
4015 | ring_buffer_read_start(struct ring_buffer_iter *iter) | 4016 | ring_buffer_read_start(struct ring_buffer_iter *iter) |
@@ -4031,7 +4032,7 @@ ring_buffer_read_start(struct ring_buffer_iter *iter) | |||
4031 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); | 4032 | EXPORT_SYMBOL_GPL(ring_buffer_read_start); |
4032 | 4033 | ||
4033 | /** | 4034 | /** |
4034 | * ring_buffer_finish - finish reading the iterator of the buffer | 4035 | * ring_buffer_read_finish - finish reading the iterator of the buffer |
4035 | * @iter: The iterator retrieved by ring_buffer_start | 4036 | * @iter: The iterator retrieved by ring_buffer_start |
4036 | * | 4037 | * |
4037 | * This re-enables the recording to the buffer, and frees the | 4038 | * This re-enables the recording to the buffer, and frees the |
@@ -4346,6 +4347,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | |||
4346 | /** | 4347 | /** |
4347 | * ring_buffer_alloc_read_page - allocate a page to read from buffer | 4348 | * ring_buffer_alloc_read_page - allocate a page to read from buffer |
4348 | * @buffer: the buffer to allocate for. | 4349 | * @buffer: the buffer to allocate for. |
4350 | * @cpu: the cpu buffer to allocate. | ||
4349 | * | 4351 | * |
4350 | * This function is used in conjunction with ring_buffer_read_page. | 4352 | * This function is used in conjunction with ring_buffer_read_page. |
4351 | * When reading a full page from the ring buffer, these functions | 4353 | * When reading a full page from the ring buffer, these functions |
@@ -4403,7 +4405,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); | |||
4403 | * to swap with a page in the ring buffer. | 4405 | * to swap with a page in the ring buffer. |
4404 | * | 4406 | * |
4405 | * for example: | 4407 | * for example: |
4406 | * rpage = ring_buffer_alloc_read_page(buffer); | 4408 | * rpage = ring_buffer_alloc_read_page(buffer, cpu); |
4407 | * if (!rpage) | 4409 | * if (!rpage) |
4408 | * return error; | 4410 | * return error; |
4409 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); | 4411 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0cd500bffd9b..882ec1dd1515 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1224,18 +1224,17 @@ void tracing_reset_current(int cpu) | |||
1224 | tracing_reset(&global_trace.trace_buffer, cpu); | 1224 | tracing_reset(&global_trace.trace_buffer, cpu); |
1225 | } | 1225 | } |
1226 | 1226 | ||
1227 | /* Must have trace_types_lock held */ | ||
1227 | void tracing_reset_all_online_cpus(void) | 1228 | void tracing_reset_all_online_cpus(void) |
1228 | { | 1229 | { |
1229 | struct trace_array *tr; | 1230 | struct trace_array *tr; |
1230 | 1231 | ||
1231 | mutex_lock(&trace_types_lock); | ||
1232 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | 1232 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { |
1233 | tracing_reset_online_cpus(&tr->trace_buffer); | 1233 | tracing_reset_online_cpus(&tr->trace_buffer); |
1234 | #ifdef CONFIG_TRACER_MAX_TRACE | 1234 | #ifdef CONFIG_TRACER_MAX_TRACE |
1235 | tracing_reset_online_cpus(&tr->max_buffer); | 1235 | tracing_reset_online_cpus(&tr->max_buffer); |
1236 | #endif | 1236 | #endif |
1237 | } | 1237 | } |
1238 | mutex_unlock(&trace_types_lock); | ||
1239 | } | 1238 | } |
1240 | 1239 | ||
1241 | #define SAVED_CMDLINES 128 | 1240 | #define SAVED_CMDLINES 128 |
@@ -2843,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v) | |||
2843 | return 0; | 2842 | return 0; |
2844 | } | 2843 | } |
2845 | 2844 | ||
2845 | /* | ||
2846 | * Should be used after trace_array_get(), trace_types_lock | ||
2847 | * ensures that i_cdev was already initialized. | ||
2848 | */ | ||
2849 | static inline int tracing_get_cpu(struct inode *inode) | ||
2850 | { | ||
2851 | if (inode->i_cdev) /* See trace_create_cpu_file() */ | ||
2852 | return (long)inode->i_cdev - 1; | ||
2853 | return RING_BUFFER_ALL_CPUS; | ||
2854 | } | ||
2855 | |||
2846 | static const struct seq_operations tracer_seq_ops = { | 2856 | static const struct seq_operations tracer_seq_ops = { |
2847 | .start = s_start, | 2857 | .start = s_start, |
2848 | .next = s_next, | 2858 | .next = s_next, |
@@ -2851,9 +2861,9 @@ static const struct seq_operations tracer_seq_ops = { | |||
2851 | }; | 2861 | }; |
2852 | 2862 | ||
2853 | static struct trace_iterator * | 2863 | static struct trace_iterator * |
2854 | __tracing_open(struct trace_array *tr, struct trace_cpu *tc, | 2864 | __tracing_open(struct inode *inode, struct file *file, bool snapshot) |
2855 | struct inode *inode, struct file *file, bool snapshot) | ||
2856 | { | 2865 | { |
2866 | struct trace_array *tr = inode->i_private; | ||
2857 | struct trace_iterator *iter; | 2867 | struct trace_iterator *iter; |
2858 | int cpu; | 2868 | int cpu; |
2859 | 2869 | ||
@@ -2894,8 +2904,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc, | |||
2894 | iter->trace_buffer = &tr->trace_buffer; | 2904 | iter->trace_buffer = &tr->trace_buffer; |
2895 | iter->snapshot = snapshot; | 2905 | iter->snapshot = snapshot; |
2896 | iter->pos = -1; | 2906 | iter->pos = -1; |
2907 | iter->cpu_file = tracing_get_cpu(inode); | ||
2897 | mutex_init(&iter->mutex); | 2908 | mutex_init(&iter->mutex); |
2898 | iter->cpu_file = tc->cpu; | ||
2899 | 2909 | ||
2900 | /* Notify the tracer early; before we stop tracing. */ | 2910 | /* Notify the tracer early; before we stop tracing. */ |
2901 | if (iter->trace && iter->trace->open) | 2911 | if (iter->trace && iter->trace->open) |
@@ -2971,45 +2981,22 @@ static int tracing_open_generic_tr(struct inode *inode, struct file *filp) | |||
2971 | filp->private_data = inode->i_private; | 2981 | filp->private_data = inode->i_private; |
2972 | 2982 | ||
2973 | return 0; | 2983 | return 0; |
2974 | |||
2975 | } | ||
2976 | |||
2977 | static int tracing_open_generic_tc(struct inode *inode, struct file *filp) | ||
2978 | { | ||
2979 | struct trace_cpu *tc = inode->i_private; | ||
2980 | struct trace_array *tr = tc->tr; | ||
2981 | |||
2982 | if (tracing_disabled) | ||
2983 | return -ENODEV; | ||
2984 | |||
2985 | if (trace_array_get(tr) < 0) | ||
2986 | return -ENODEV; | ||
2987 | |||
2988 | filp->private_data = inode->i_private; | ||
2989 | |||
2990 | return 0; | ||
2991 | |||
2992 | } | 2984 | } |
2993 | 2985 | ||
2994 | static int tracing_release(struct inode *inode, struct file *file) | 2986 | static int tracing_release(struct inode *inode, struct file *file) |
2995 | { | 2987 | { |
2988 | struct trace_array *tr = inode->i_private; | ||
2996 | struct seq_file *m = file->private_data; | 2989 | struct seq_file *m = file->private_data; |
2997 | struct trace_iterator *iter; | 2990 | struct trace_iterator *iter; |
2998 | struct trace_array *tr; | ||
2999 | int cpu; | 2991 | int cpu; |
3000 | 2992 | ||
3001 | /* Writes do not use seq_file, need to grab tr from inode */ | ||
3002 | if (!(file->f_mode & FMODE_READ)) { | 2993 | if (!(file->f_mode & FMODE_READ)) { |
3003 | struct trace_cpu *tc = inode->i_private; | 2994 | trace_array_put(tr); |
3004 | |||
3005 | trace_array_put(tc->tr); | ||
3006 | return 0; | 2995 | return 0; |
3007 | } | 2996 | } |
3008 | 2997 | ||
2998 | /* Writes do not use seq_file */ | ||
3009 | iter = m->private; | 2999 | iter = m->private; |
3010 | tr = iter->tr; | ||
3011 | trace_array_put(tr); | ||
3012 | |||
3013 | mutex_lock(&trace_types_lock); | 3000 | mutex_lock(&trace_types_lock); |
3014 | 3001 | ||
3015 | for_each_tracing_cpu(cpu) { | 3002 | for_each_tracing_cpu(cpu) { |
@@ -3023,6 +3010,9 @@ static int tracing_release(struct inode *inode, struct file *file) | |||
3023 | if (!iter->snapshot) | 3010 | if (!iter->snapshot) |
3024 | /* reenable tracing if it was previously enabled */ | 3011 | /* reenable tracing if it was previously enabled */ |
3025 | tracing_start_tr(tr); | 3012 | tracing_start_tr(tr); |
3013 | |||
3014 | __trace_array_put(tr); | ||
3015 | |||
3026 | mutex_unlock(&trace_types_lock); | 3016 | mutex_unlock(&trace_types_lock); |
3027 | 3017 | ||
3028 | mutex_destroy(&iter->mutex); | 3018 | mutex_destroy(&iter->mutex); |
@@ -3042,15 +3032,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file) | |||
3042 | return 0; | 3032 | return 0; |
3043 | } | 3033 | } |
3044 | 3034 | ||
3045 | static int tracing_release_generic_tc(struct inode *inode, struct file *file) | ||
3046 | { | ||
3047 | struct trace_cpu *tc = inode->i_private; | ||
3048 | struct trace_array *tr = tc->tr; | ||
3049 | |||
3050 | trace_array_put(tr); | ||
3051 | return 0; | ||
3052 | } | ||
3053 | |||
3054 | static int tracing_single_release_tr(struct inode *inode, struct file *file) | 3035 | static int tracing_single_release_tr(struct inode *inode, struct file *file) |
3055 | { | 3036 | { |
3056 | struct trace_array *tr = inode->i_private; | 3037 | struct trace_array *tr = inode->i_private; |
@@ -3062,8 +3043,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file) | |||
3062 | 3043 | ||
3063 | static int tracing_open(struct inode *inode, struct file *file) | 3044 | static int tracing_open(struct inode *inode, struct file *file) |
3064 | { | 3045 | { |
3065 | struct trace_cpu *tc = inode->i_private; | 3046 | struct trace_array *tr = inode->i_private; |
3066 | struct trace_array *tr = tc->tr; | ||
3067 | struct trace_iterator *iter; | 3047 | struct trace_iterator *iter; |
3068 | int ret = 0; | 3048 | int ret = 0; |
3069 | 3049 | ||
@@ -3071,16 +3051,17 @@ static int tracing_open(struct inode *inode, struct file *file) | |||
3071 | return -ENODEV; | 3051 | return -ENODEV; |
3072 | 3052 | ||
3073 | /* If this file was open for write, then erase contents */ | 3053 | /* If this file was open for write, then erase contents */ |
3074 | if ((file->f_mode & FMODE_WRITE) && | 3054 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { |
3075 | (file->f_flags & O_TRUNC)) { | 3055 | int cpu = tracing_get_cpu(inode); |
3076 | if (tc->cpu == RING_BUFFER_ALL_CPUS) | 3056 | |
3057 | if (cpu == RING_BUFFER_ALL_CPUS) | ||
3077 | tracing_reset_online_cpus(&tr->trace_buffer); | 3058 | tracing_reset_online_cpus(&tr->trace_buffer); |
3078 | else | 3059 | else |
3079 | tracing_reset(&tr->trace_buffer, tc->cpu); | 3060 | tracing_reset(&tr->trace_buffer, cpu); |
3080 | } | 3061 | } |
3081 | 3062 | ||
3082 | if (file->f_mode & FMODE_READ) { | 3063 | if (file->f_mode & FMODE_READ) { |
3083 | iter = __tracing_open(tr, tc, inode, file, false); | 3064 | iter = __tracing_open(inode, file, false); |
3084 | if (IS_ERR(iter)) | 3065 | if (IS_ERR(iter)) |
3085 | ret = PTR_ERR(iter); | 3066 | ret = PTR_ERR(iter); |
3086 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | 3067 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) |
@@ -3447,6 +3428,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
3447 | static int tracing_trace_options_open(struct inode *inode, struct file *file) | 3428 | static int tracing_trace_options_open(struct inode *inode, struct file *file) |
3448 | { | 3429 | { |
3449 | struct trace_array *tr = inode->i_private; | 3430 | struct trace_array *tr = inode->i_private; |
3431 | int ret; | ||
3450 | 3432 | ||
3451 | if (tracing_disabled) | 3433 | if (tracing_disabled) |
3452 | return -ENODEV; | 3434 | return -ENODEV; |
@@ -3454,7 +3436,11 @@ static int tracing_trace_options_open(struct inode *inode, struct file *file) | |||
3454 | if (trace_array_get(tr) < 0) | 3436 | if (trace_array_get(tr) < 0) |
3455 | return -ENODEV; | 3437 | return -ENODEV; |
3456 | 3438 | ||
3457 | return single_open(file, tracing_trace_options_show, inode->i_private); | 3439 | ret = single_open(file, tracing_trace_options_show, inode->i_private); |
3440 | if (ret < 0) | ||
3441 | trace_array_put(tr); | ||
3442 | |||
3443 | return ret; | ||
3458 | } | 3444 | } |
3459 | 3445 | ||
3460 | static const struct file_operations tracing_iter_fops = { | 3446 | static const struct file_operations tracing_iter_fops = { |
@@ -3537,14 +3523,14 @@ static const char readme_msg[] = | |||
3537 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" | 3523 | "\n snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n" |
3538 | "\t\t\t Read the contents for more information\n" | 3524 | "\t\t\t Read the contents for more information\n" |
3539 | #endif | 3525 | #endif |
3540 | #ifdef CONFIG_STACKTRACE | 3526 | #ifdef CONFIG_STACK_TRACER |
3541 | " stack_trace\t\t- Shows the max stack trace when active\n" | 3527 | " stack_trace\t\t- Shows the max stack trace when active\n" |
3542 | " stack_max_size\t- Shows current max stack size that was traced\n" | 3528 | " stack_max_size\t- Shows current max stack size that was traced\n" |
3543 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" | 3529 | "\t\t\t Write into this file to reset the max size (trigger a new trace)\n" |
3544 | #ifdef CONFIG_DYNAMIC_FTRACE | 3530 | #ifdef CONFIG_DYNAMIC_FTRACE |
3545 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" | 3531 | " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n" |
3546 | #endif | 3532 | #endif |
3547 | #endif /* CONFIG_STACKTRACE */ | 3533 | #endif /* CONFIG_STACK_TRACER */ |
3548 | ; | 3534 | ; |
3549 | 3535 | ||
3550 | static ssize_t | 3536 | static ssize_t |
@@ -3941,8 +3927,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
3941 | 3927 | ||
3942 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 3928 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
3943 | { | 3929 | { |
3944 | struct trace_cpu *tc = inode->i_private; | 3930 | struct trace_array *tr = inode->i_private; |
3945 | struct trace_array *tr = tc->tr; | ||
3946 | struct trace_iterator *iter; | 3931 | struct trace_iterator *iter; |
3947 | int ret = 0; | 3932 | int ret = 0; |
3948 | 3933 | ||
@@ -3958,6 +3943,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3958 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 3943 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
3959 | if (!iter) { | 3944 | if (!iter) { |
3960 | ret = -ENOMEM; | 3945 | ret = -ENOMEM; |
3946 | __trace_array_put(tr); | ||
3961 | goto out; | 3947 | goto out; |
3962 | } | 3948 | } |
3963 | 3949 | ||
@@ -3987,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3987 | if (trace_clocks[tr->clock_id].in_ns) | 3973 | if (trace_clocks[tr->clock_id].in_ns) |
3988 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; | 3974 | iter->iter_flags |= TRACE_FILE_TIME_IN_NS; |
3989 | 3975 | ||
3990 | iter->cpu_file = tc->cpu; | 3976 | iter->tr = tr; |
3991 | iter->tr = tc->tr; | 3977 | iter->trace_buffer = &tr->trace_buffer; |
3992 | iter->trace_buffer = &tc->tr->trace_buffer; | 3978 | iter->cpu_file = tracing_get_cpu(inode); |
3993 | mutex_init(&iter->mutex); | 3979 | mutex_init(&iter->mutex); |
3994 | filp->private_data = iter; | 3980 | filp->private_data = iter; |
3995 | 3981 | ||
@@ -4012,8 +3998,7 @@ fail: | |||
4012 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 3998 | static int tracing_release_pipe(struct inode *inode, struct file *file) |
4013 | { | 3999 | { |
4014 | struct trace_iterator *iter = file->private_data; | 4000 | struct trace_iterator *iter = file->private_data; |
4015 | struct trace_cpu *tc = inode->i_private; | 4001 | struct trace_array *tr = inode->i_private; |
4016 | struct trace_array *tr = tc->tr; | ||
4017 | 4002 | ||
4018 | mutex_lock(&trace_types_lock); | 4003 | mutex_lock(&trace_types_lock); |
4019 | 4004 | ||
@@ -4366,15 +4351,16 @@ static ssize_t | |||
4366 | tracing_entries_read(struct file *filp, char __user *ubuf, | 4351 | tracing_entries_read(struct file *filp, char __user *ubuf, |
4367 | size_t cnt, loff_t *ppos) | 4352 | size_t cnt, loff_t *ppos) |
4368 | { | 4353 | { |
4369 | struct trace_cpu *tc = filp->private_data; | 4354 | struct inode *inode = file_inode(filp); |
4370 | struct trace_array *tr = tc->tr; | 4355 | struct trace_array *tr = inode->i_private; |
4356 | int cpu = tracing_get_cpu(inode); | ||
4371 | char buf[64]; | 4357 | char buf[64]; |
4372 | int r = 0; | 4358 | int r = 0; |
4373 | ssize_t ret; | 4359 | ssize_t ret; |
4374 | 4360 | ||
4375 | mutex_lock(&trace_types_lock); | 4361 | mutex_lock(&trace_types_lock); |
4376 | 4362 | ||
4377 | if (tc->cpu == RING_BUFFER_ALL_CPUS) { | 4363 | if (cpu == RING_BUFFER_ALL_CPUS) { |
4378 | int cpu, buf_size_same; | 4364 | int cpu, buf_size_same; |
4379 | unsigned long size; | 4365 | unsigned long size; |
4380 | 4366 | ||
@@ -4401,7 +4387,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf, | |||
4401 | } else | 4387 | } else |
4402 | r = sprintf(buf, "X\n"); | 4388 | r = sprintf(buf, "X\n"); |
4403 | } else | 4389 | } else |
4404 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10); | 4390 | r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10); |
4405 | 4391 | ||
4406 | mutex_unlock(&trace_types_lock); | 4392 | mutex_unlock(&trace_types_lock); |
4407 | 4393 | ||
@@ -4413,7 +4399,8 @@ static ssize_t | |||
4413 | tracing_entries_write(struct file *filp, const char __user *ubuf, | 4399 | tracing_entries_write(struct file *filp, const char __user *ubuf, |
4414 | size_t cnt, loff_t *ppos) | 4400 | size_t cnt, loff_t *ppos) |
4415 | { | 4401 | { |
4416 | struct trace_cpu *tc = filp->private_data; | 4402 | struct inode *inode = file_inode(filp); |
4403 | struct trace_array *tr = inode->i_private; | ||
4417 | unsigned long val; | 4404 | unsigned long val; |
4418 | int ret; | 4405 | int ret; |
4419 | 4406 | ||
@@ -4427,8 +4414,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
4427 | 4414 | ||
4428 | /* value is in KB */ | 4415 | /* value is in KB */ |
4429 | val <<= 10; | 4416 | val <<= 10; |
4430 | 4417 | ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode)); | |
4431 | ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu); | ||
4432 | if (ret < 0) | 4418 | if (ret < 0) |
4433 | return ret; | 4419 | return ret; |
4434 | 4420 | ||
@@ -4689,8 +4675,7 @@ struct ftrace_buffer_info { | |||
4689 | #ifdef CONFIG_TRACER_SNAPSHOT | 4675 | #ifdef CONFIG_TRACER_SNAPSHOT |
4690 | static int tracing_snapshot_open(struct inode *inode, struct file *file) | 4676 | static int tracing_snapshot_open(struct inode *inode, struct file *file) |
4691 | { | 4677 | { |
4692 | struct trace_cpu *tc = inode->i_private; | 4678 | struct trace_array *tr = inode->i_private; |
4693 | struct trace_array *tr = tc->tr; | ||
4694 | struct trace_iterator *iter; | 4679 | struct trace_iterator *iter; |
4695 | struct seq_file *m; | 4680 | struct seq_file *m; |
4696 | int ret = 0; | 4681 | int ret = 0; |
@@ -4699,26 +4684,29 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file) | |||
4699 | return -ENODEV; | 4684 | return -ENODEV; |
4700 | 4685 | ||
4701 | if (file->f_mode & FMODE_READ) { | 4686 | if (file->f_mode & FMODE_READ) { |
4702 | iter = __tracing_open(tr, tc, inode, file, true); | 4687 | iter = __tracing_open(inode, file, true); |
4703 | if (IS_ERR(iter)) | 4688 | if (IS_ERR(iter)) |
4704 | ret = PTR_ERR(iter); | 4689 | ret = PTR_ERR(iter); |
4705 | } else { | 4690 | } else { |
4706 | /* Writes still need the seq_file to hold the private data */ | 4691 | /* Writes still need the seq_file to hold the private data */ |
4692 | ret = -ENOMEM; | ||
4707 | m = kzalloc(sizeof(*m), GFP_KERNEL); | 4693 | m = kzalloc(sizeof(*m), GFP_KERNEL); |
4708 | if (!m) | 4694 | if (!m) |
4709 | return -ENOMEM; | 4695 | goto out; |
4710 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 4696 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
4711 | if (!iter) { | 4697 | if (!iter) { |
4712 | kfree(m); | 4698 | kfree(m); |
4713 | return -ENOMEM; | 4699 | goto out; |
4714 | } | 4700 | } |
4701 | ret = 0; | ||
4702 | |||
4715 | iter->tr = tr; | 4703 | iter->tr = tr; |
4716 | iter->trace_buffer = &tc->tr->max_buffer; | 4704 | iter->trace_buffer = &tr->max_buffer; |
4717 | iter->cpu_file = tc->cpu; | 4705 | iter->cpu_file = tracing_get_cpu(inode); |
4718 | m->private = iter; | 4706 | m->private = iter; |
4719 | file->private_data = m; | 4707 | file->private_data = m; |
4720 | } | 4708 | } |
4721 | 4709 | out: | |
4722 | if (ret < 0) | 4710 | if (ret < 0) |
4723 | trace_array_put(tr); | 4711 | trace_array_put(tr); |
4724 | 4712 | ||
@@ -4873,11 +4861,11 @@ static const struct file_operations tracing_pipe_fops = { | |||
4873 | }; | 4861 | }; |
4874 | 4862 | ||
4875 | static const struct file_operations tracing_entries_fops = { | 4863 | static const struct file_operations tracing_entries_fops = { |
4876 | .open = tracing_open_generic_tc, | 4864 | .open = tracing_open_generic_tr, |
4877 | .read = tracing_entries_read, | 4865 | .read = tracing_entries_read, |
4878 | .write = tracing_entries_write, | 4866 | .write = tracing_entries_write, |
4879 | .llseek = generic_file_llseek, | 4867 | .llseek = generic_file_llseek, |
4880 | .release = tracing_release_generic_tc, | 4868 | .release = tracing_release_generic_tr, |
4881 | }; | 4869 | }; |
4882 | 4870 | ||
4883 | static const struct file_operations tracing_total_entries_fops = { | 4871 | static const struct file_operations tracing_total_entries_fops = { |
@@ -4929,8 +4917,7 @@ static const struct file_operations snapshot_raw_fops = { | |||
4929 | 4917 | ||
4930 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | 4918 | static int tracing_buffers_open(struct inode *inode, struct file *filp) |
4931 | { | 4919 | { |
4932 | struct trace_cpu *tc = inode->i_private; | 4920 | struct trace_array *tr = inode->i_private; |
4933 | struct trace_array *tr = tc->tr; | ||
4934 | struct ftrace_buffer_info *info; | 4921 | struct ftrace_buffer_info *info; |
4935 | int ret; | 4922 | int ret; |
4936 | 4923 | ||
@@ -4948,10 +4935,8 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp) | |||
4948 | 4935 | ||
4949 | mutex_lock(&trace_types_lock); | 4936 | mutex_lock(&trace_types_lock); |
4950 | 4937 | ||
4951 | tr->ref++; | ||
4952 | |||
4953 | info->iter.tr = tr; | 4938 | info->iter.tr = tr; |
4954 | info->iter.cpu_file = tc->cpu; | 4939 | info->iter.cpu_file = tracing_get_cpu(inode); |
4955 | info->iter.trace = tr->current_trace; | 4940 | info->iter.trace = tr->current_trace; |
4956 | info->iter.trace_buffer = &tr->trace_buffer; | 4941 | info->iter.trace_buffer = &tr->trace_buffer; |
4957 | info->spare = NULL; | 4942 | info->spare = NULL; |
@@ -5268,14 +5253,14 @@ static ssize_t | |||
5268 | tracing_stats_read(struct file *filp, char __user *ubuf, | 5253 | tracing_stats_read(struct file *filp, char __user *ubuf, |
5269 | size_t count, loff_t *ppos) | 5254 | size_t count, loff_t *ppos) |
5270 | { | 5255 | { |
5271 | struct trace_cpu *tc = filp->private_data; | 5256 | struct inode *inode = file_inode(filp); |
5272 | struct trace_array *tr = tc->tr; | 5257 | struct trace_array *tr = inode->i_private; |
5273 | struct trace_buffer *trace_buf = &tr->trace_buffer; | 5258 | struct trace_buffer *trace_buf = &tr->trace_buffer; |
5259 | int cpu = tracing_get_cpu(inode); | ||
5274 | struct trace_seq *s; | 5260 | struct trace_seq *s; |
5275 | unsigned long cnt; | 5261 | unsigned long cnt; |
5276 | unsigned long long t; | 5262 | unsigned long long t; |
5277 | unsigned long usec_rem; | 5263 | unsigned long usec_rem; |
5278 | int cpu = tc->cpu; | ||
5279 | 5264 | ||
5280 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 5265 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
5281 | if (!s) | 5266 | if (!s) |
@@ -5328,9 +5313,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
5328 | } | 5313 | } |
5329 | 5314 | ||
5330 | static const struct file_operations tracing_stats_fops = { | 5315 | static const struct file_operations tracing_stats_fops = { |
5331 | .open = tracing_open_generic, | 5316 | .open = tracing_open_generic_tr, |
5332 | .read = tracing_stats_read, | 5317 | .read = tracing_stats_read, |
5333 | .llseek = generic_file_llseek, | 5318 | .llseek = generic_file_llseek, |
5319 | .release = tracing_release_generic_tr, | ||
5334 | }; | 5320 | }; |
5335 | 5321 | ||
5336 | #ifdef CONFIG_DYNAMIC_FTRACE | 5322 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -5519,10 +5505,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu) | |||
5519 | return tr->percpu_dir; | 5505 | return tr->percpu_dir; |
5520 | } | 5506 | } |
5521 | 5507 | ||
5508 | static struct dentry * | ||
5509 | trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent, | ||
5510 | void *data, long cpu, const struct file_operations *fops) | ||
5511 | { | ||
5512 | struct dentry *ret = trace_create_file(name, mode, parent, data, fops); | ||
5513 | |||
5514 | if (ret) /* See tracing_get_cpu() */ | ||
5515 | ret->d_inode->i_cdev = (void *)(cpu + 1); | ||
5516 | return ret; | ||
5517 | } | ||
5518 | |||
5522 | static void | 5519 | static void |
5523 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | 5520 | tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) |
5524 | { | 5521 | { |
5525 | struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu); | ||
5526 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); | 5522 | struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu); |
5527 | struct dentry *d_cpu; | 5523 | struct dentry *d_cpu; |
5528 | char cpu_dir[30]; /* 30 characters should be more than enough */ | 5524 | char cpu_dir[30]; /* 30 characters should be more than enough */ |
@@ -5538,28 +5534,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu) | |||
5538 | } | 5534 | } |
5539 | 5535 | ||
5540 | /* per cpu trace_pipe */ | 5536 | /* per cpu trace_pipe */ |
5541 | trace_create_file("trace_pipe", 0444, d_cpu, | 5537 | trace_create_cpu_file("trace_pipe", 0444, d_cpu, |
5542 | (void *)&data->trace_cpu, &tracing_pipe_fops); | 5538 | tr, cpu, &tracing_pipe_fops); |
5543 | 5539 | ||
5544 | /* per cpu trace */ | 5540 | /* per cpu trace */ |
5545 | trace_create_file("trace", 0644, d_cpu, | 5541 | trace_create_cpu_file("trace", 0644, d_cpu, |
5546 | (void *)&data->trace_cpu, &tracing_fops); | 5542 | tr, cpu, &tracing_fops); |
5547 | 5543 | ||
5548 | trace_create_file("trace_pipe_raw", 0444, d_cpu, | 5544 | trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu, |
5549 | (void *)&data->trace_cpu, &tracing_buffers_fops); | 5545 | tr, cpu, &tracing_buffers_fops); |
5550 | 5546 | ||
5551 | trace_create_file("stats", 0444, d_cpu, | 5547 | trace_create_cpu_file("stats", 0444, d_cpu, |
5552 | (void *)&data->trace_cpu, &tracing_stats_fops); | 5548 | tr, cpu, &tracing_stats_fops); |
5553 | 5549 | ||
5554 | trace_create_file("buffer_size_kb", 0444, d_cpu, | 5550 | trace_create_cpu_file("buffer_size_kb", 0444, d_cpu, |
5555 | (void *)&data->trace_cpu, &tracing_entries_fops); | 5551 | tr, cpu, &tracing_entries_fops); |
5556 | 5552 | ||
5557 | #ifdef CONFIG_TRACER_SNAPSHOT | 5553 | #ifdef CONFIG_TRACER_SNAPSHOT |
5558 | trace_create_file("snapshot", 0644, d_cpu, | 5554 | trace_create_cpu_file("snapshot", 0644, d_cpu, |
5559 | (void *)&data->trace_cpu, &snapshot_fops); | 5555 | tr, cpu, &snapshot_fops); |
5560 | 5556 | ||
5561 | trace_create_file("snapshot_raw", 0444, d_cpu, | 5557 | trace_create_cpu_file("snapshot_raw", 0444, d_cpu, |
5562 | (void *)&data->trace_cpu, &snapshot_raw_fops); | 5558 | tr, cpu, &snapshot_raw_fops); |
5563 | #endif | 5559 | #endif |
5564 | } | 5560 | } |
5565 | 5561 | ||
@@ -5868,17 +5864,6 @@ struct dentry *trace_instance_dir; | |||
5868 | static void | 5864 | static void |
5869 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); | 5865 | init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer); |
5870 | 5866 | ||
5871 | static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf) | ||
5872 | { | ||
5873 | int cpu; | ||
5874 | |||
5875 | for_each_tracing_cpu(cpu) { | ||
5876 | memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu)); | ||
5877 | per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu; | ||
5878 | per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr; | ||
5879 | } | ||
5880 | } | ||
5881 | |||
5882 | static int | 5867 | static int |
5883 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) | 5868 | allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size) |
5884 | { | 5869 | { |
@@ -5896,8 +5881,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size | |||
5896 | return -ENOMEM; | 5881 | return -ENOMEM; |
5897 | } | 5882 | } |
5898 | 5883 | ||
5899 | init_trace_buffers(tr, buf); | ||
5900 | |||
5901 | /* Allocate the first page for all buffers */ | 5884 | /* Allocate the first page for all buffers */ |
5902 | set_buffer_entries(&tr->trace_buffer, | 5885 | set_buffer_entries(&tr->trace_buffer, |
5903 | ring_buffer_size(tr->trace_buffer.buffer, 0)); | 5886 | ring_buffer_size(tr->trace_buffer.buffer, 0)); |
@@ -5964,17 +5947,15 @@ static int new_instance_create(const char *name) | |||
5964 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) | 5947 | if (allocate_trace_buffers(tr, trace_buf_size) < 0) |
5965 | goto out_free_tr; | 5948 | goto out_free_tr; |
5966 | 5949 | ||
5967 | /* Holder for file callbacks */ | ||
5968 | tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS; | ||
5969 | tr->trace_cpu.tr = tr; | ||
5970 | |||
5971 | tr->dir = debugfs_create_dir(name, trace_instance_dir); | 5950 | tr->dir = debugfs_create_dir(name, trace_instance_dir); |
5972 | if (!tr->dir) | 5951 | if (!tr->dir) |
5973 | goto out_free_tr; | 5952 | goto out_free_tr; |
5974 | 5953 | ||
5975 | ret = event_trace_add_tracer(tr->dir, tr); | 5954 | ret = event_trace_add_tracer(tr->dir, tr); |
5976 | if (ret) | 5955 | if (ret) { |
5956 | debugfs_remove_recursive(tr->dir); | ||
5977 | goto out_free_tr; | 5957 | goto out_free_tr; |
5958 | } | ||
5978 | 5959 | ||
5979 | init_tracer_debugfs(tr, tr->dir); | 5960 | init_tracer_debugfs(tr, tr->dir); |
5980 | 5961 | ||
@@ -6120,13 +6101,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
6120 | tr, &tracing_iter_fops); | 6101 | tr, &tracing_iter_fops); |
6121 | 6102 | ||
6122 | trace_create_file("trace", 0644, d_tracer, | 6103 | trace_create_file("trace", 0644, d_tracer, |
6123 | (void *)&tr->trace_cpu, &tracing_fops); | 6104 | tr, &tracing_fops); |
6124 | 6105 | ||
6125 | trace_create_file("trace_pipe", 0444, d_tracer, | 6106 | trace_create_file("trace_pipe", 0444, d_tracer, |
6126 | (void *)&tr->trace_cpu, &tracing_pipe_fops); | 6107 | tr, &tracing_pipe_fops); |
6127 | 6108 | ||
6128 | trace_create_file("buffer_size_kb", 0644, d_tracer, | 6109 | trace_create_file("buffer_size_kb", 0644, d_tracer, |
6129 | (void *)&tr->trace_cpu, &tracing_entries_fops); | 6110 | tr, &tracing_entries_fops); |
6130 | 6111 | ||
6131 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, | 6112 | trace_create_file("buffer_total_size_kb", 0444, d_tracer, |
6132 | tr, &tracing_total_entries_fops); | 6113 | tr, &tracing_total_entries_fops); |
@@ -6141,11 +6122,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
6141 | &trace_clock_fops); | 6122 | &trace_clock_fops); |
6142 | 6123 | ||
6143 | trace_create_file("tracing_on", 0644, d_tracer, | 6124 | trace_create_file("tracing_on", 0644, d_tracer, |
6144 | tr, &rb_simple_fops); | 6125 | tr, &rb_simple_fops); |
6145 | 6126 | ||
6146 | #ifdef CONFIG_TRACER_SNAPSHOT | 6127 | #ifdef CONFIG_TRACER_SNAPSHOT |
6147 | trace_create_file("snapshot", 0644, d_tracer, | 6128 | trace_create_file("snapshot", 0644, d_tracer, |
6148 | (void *)&tr->trace_cpu, &snapshot_fops); | 6129 | tr, &snapshot_fops); |
6149 | #endif | 6130 | #endif |
6150 | 6131 | ||
6151 | for_each_tracing_cpu(cpu) | 6132 | for_each_tracing_cpu(cpu) |
@@ -6439,10 +6420,6 @@ __init static int tracer_alloc_buffers(void) | |||
6439 | 6420 | ||
6440 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; | 6421 | global_trace.flags = TRACE_ARRAY_FL_GLOBAL; |
6441 | 6422 | ||
6442 | /* Holder for file callbacks */ | ||
6443 | global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS; | ||
6444 | global_trace.trace_cpu.tr = &global_trace; | ||
6445 | |||
6446 | INIT_LIST_HEAD(&global_trace.systems); | 6423 | INIT_LIST_HEAD(&global_trace.systems); |
6447 | INIT_LIST_HEAD(&global_trace.events); | 6424 | INIT_LIST_HEAD(&global_trace.events); |
6448 | list_add(&global_trace.list, &ftrace_trace_arrays); | 6425 | list_add(&global_trace.list, &ftrace_trace_arrays); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4a4f6e1828b6..afaae41b0a02 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -130,19 +130,12 @@ enum trace_flag_type { | |||
130 | 130 | ||
131 | struct trace_array; | 131 | struct trace_array; |
132 | 132 | ||
133 | struct trace_cpu { | ||
134 | struct trace_array *tr; | ||
135 | struct dentry *dir; | ||
136 | int cpu; | ||
137 | }; | ||
138 | |||
139 | /* | 133 | /* |
140 | * The CPU trace array - it consists of thousands of trace entries | 134 | * The CPU trace array - it consists of thousands of trace entries |
141 | * plus some other descriptor data: (for example which task started | 135 | * plus some other descriptor data: (for example which task started |
142 | * the trace, etc.) | 136 | * the trace, etc.) |
143 | */ | 137 | */ |
144 | struct trace_array_cpu { | 138 | struct trace_array_cpu { |
145 | struct trace_cpu trace_cpu; | ||
146 | atomic_t disabled; | 139 | atomic_t disabled; |
147 | void *buffer_page; /* ring buffer spare */ | 140 | void *buffer_page; /* ring buffer spare */ |
148 | 141 | ||
@@ -196,7 +189,6 @@ struct trace_array { | |||
196 | bool allocated_snapshot; | 189 | bool allocated_snapshot; |
197 | #endif | 190 | #endif |
198 | int buffer_disabled; | 191 | int buffer_disabled; |
199 | struct trace_cpu trace_cpu; /* place holder */ | ||
200 | #ifdef CONFIG_FTRACE_SYSCALLS | 192 | #ifdef CONFIG_FTRACE_SYSCALLS |
201 | int sys_refcount_enter; | 193 | int sys_refcount_enter; |
202 | int sys_refcount_exit; | 194 | int sys_refcount_exit; |
@@ -214,7 +206,6 @@ struct trace_array { | |||
214 | struct dentry *event_dir; | 206 | struct dentry *event_dir; |
215 | struct list_head systems; | 207 | struct list_head systems; |
216 | struct list_head events; | 208 | struct list_head events; |
217 | struct task_struct *waiter; | ||
218 | int ref; | 209 | int ref; |
219 | }; | 210 | }; |
220 | 211 | ||
@@ -680,6 +671,15 @@ extern int trace_selftest_startup_sched_switch(struct tracer *trace, | |||
680 | struct trace_array *tr); | 671 | struct trace_array *tr); |
681 | extern int trace_selftest_startup_branch(struct tracer *trace, | 672 | extern int trace_selftest_startup_branch(struct tracer *trace, |
682 | struct trace_array *tr); | 673 | struct trace_array *tr); |
674 | /* | ||
675 | * Tracer data references selftest functions that only occur | ||
676 | * on boot up. These can be __init functions. Thus, when selftests | ||
677 | * are enabled, then the tracers need to reference __init functions. | ||
678 | */ | ||
679 | #define __tracer_data __refdata | ||
680 | #else | ||
681 | /* Tracers are seldom changed. Optimize when selftests are disabled. */ | ||
682 | #define __tracer_data __read_mostly | ||
683 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 683 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
684 | 684 | ||
685 | extern void *head_page(struct trace_array_cpu *data); | 685 | extern void *head_page(struct trace_array_cpu *data); |
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 84b1e045faba..80c36bcf66e8 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -236,6 +236,10 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type, | |||
236 | 236 | ||
237 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); | 237 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long)); |
238 | 238 | ||
239 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
240 | "perf buffer not large enough")) | ||
241 | return NULL; | ||
242 | |||
239 | pc = preempt_count(); | 243 | pc = preempt_count(); |
240 | 244 | ||
241 | *rctxp = perf_swevent_get_recursion_context(); | 245 | *rctxp = perf_swevent_get_recursion_context(); |
@@ -266,6 +270,10 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, | |||
266 | struct pt_regs regs; | 270 | struct pt_regs regs; |
267 | int rctx; | 271 | int rctx; |
268 | 272 | ||
273 | head = this_cpu_ptr(event_function.perf_events); | ||
274 | if (hlist_empty(head)) | ||
275 | return; | ||
276 | |||
269 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ | 277 | #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \ |
270 | sizeof(u64)) - sizeof(u32)) | 278 | sizeof(u64)) - sizeof(u32)) |
271 | 279 | ||
@@ -279,8 +287,6 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, | |||
279 | 287 | ||
280 | entry->ip = ip; | 288 | entry->ip = ip; |
281 | entry->parent_ip = parent_ip; | 289 | entry->parent_ip = parent_ip; |
282 | |||
283 | head = this_cpu_ptr(event_function.perf_events); | ||
284 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, | 290 | perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, |
285 | 1, ®s, head, NULL); | 291 | 1, ®s, head, NULL); |
286 | 292 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 7d854290bf81..898f868833f2 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -826,59 +826,33 @@ enum { | |||
826 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) | 826 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
827 | { | 827 | { |
828 | struct ftrace_event_call *call = m->private; | 828 | struct ftrace_event_call *call = m->private; |
829 | struct ftrace_event_field *field; | ||
830 | struct list_head *common_head = &ftrace_common_fields; | 829 | struct list_head *common_head = &ftrace_common_fields; |
831 | struct list_head *head = trace_get_fields(call); | 830 | struct list_head *head = trace_get_fields(call); |
831 | struct list_head *node = v; | ||
832 | 832 | ||
833 | (*pos)++; | 833 | (*pos)++; |
834 | 834 | ||
835 | switch ((unsigned long)v) { | 835 | switch ((unsigned long)v) { |
836 | case FORMAT_HEADER: | 836 | case FORMAT_HEADER: |
837 | if (unlikely(list_empty(common_head))) | 837 | node = common_head; |
838 | return NULL; | 838 | break; |
839 | |||
840 | field = list_entry(common_head->prev, | ||
841 | struct ftrace_event_field, link); | ||
842 | return field; | ||
843 | 839 | ||
844 | case FORMAT_FIELD_SEPERATOR: | 840 | case FORMAT_FIELD_SEPERATOR: |
845 | if (unlikely(list_empty(head))) | 841 | node = head; |
846 | return NULL; | 842 | break; |
847 | |||
848 | field = list_entry(head->prev, struct ftrace_event_field, link); | ||
849 | return field; | ||
850 | 843 | ||
851 | case FORMAT_PRINTFMT: | 844 | case FORMAT_PRINTFMT: |
852 | /* all done */ | 845 | /* all done */ |
853 | return NULL; | 846 | return NULL; |
854 | } | 847 | } |
855 | 848 | ||
856 | field = v; | 849 | node = node->prev; |
857 | if (field->link.prev == common_head) | 850 | if (node == common_head) |
858 | return (void *)FORMAT_FIELD_SEPERATOR; | 851 | return (void *)FORMAT_FIELD_SEPERATOR; |
859 | else if (field->link.prev == head) | 852 | else if (node == head) |
860 | return (void *)FORMAT_PRINTFMT; | 853 | return (void *)FORMAT_PRINTFMT; |
861 | 854 | else | |
862 | field = list_entry(field->link.prev, struct ftrace_event_field, link); | 855 | return node; |
863 | |||
864 | return field; | ||
865 | } | ||
866 | |||
867 | static void *f_start(struct seq_file *m, loff_t *pos) | ||
868 | { | ||
869 | loff_t l = 0; | ||
870 | void *p; | ||
871 | |||
872 | /* Start by showing the header */ | ||
873 | if (!*pos) | ||
874 | return (void *)FORMAT_HEADER; | ||
875 | |||
876 | p = (void *)FORMAT_HEADER; | ||
877 | do { | ||
878 | p = f_next(m, p, &l); | ||
879 | } while (p && l < *pos); | ||
880 | |||
881 | return p; | ||
882 | } | 856 | } |
883 | 857 | ||
884 | static int f_show(struct seq_file *m, void *v) | 858 | static int f_show(struct seq_file *m, void *v) |
@@ -904,8 +878,7 @@ static int f_show(struct seq_file *m, void *v) | |||
904 | return 0; | 878 | return 0; |
905 | } | 879 | } |
906 | 880 | ||
907 | field = v; | 881 | field = list_entry(v, struct ftrace_event_field, link); |
908 | |||
909 | /* | 882 | /* |
910 | * Smartly shows the array type(except dynamic array). | 883 | * Smartly shows the array type(except dynamic array). |
911 | * Normal: | 884 | * Normal: |
@@ -932,6 +905,17 @@ static int f_show(struct seq_file *m, void *v) | |||
932 | return 0; | 905 | return 0; |
933 | } | 906 | } |
934 | 907 | ||
908 | static void *f_start(struct seq_file *m, loff_t *pos) | ||
909 | { | ||
910 | void *p = (void *)FORMAT_HEADER; | ||
911 | loff_t l = 0; | ||
912 | |||
913 | while (l < *pos && p) | ||
914 | p = f_next(m, p, &l); | ||
915 | |||
916 | return p; | ||
917 | } | ||
918 | |||
935 | static void f_stop(struct seq_file *m, void *p) | 919 | static void f_stop(struct seq_file *m, void *p) |
936 | { | 920 | { |
937 | } | 921 | } |
@@ -963,23 +947,14 @@ static ssize_t | |||
963 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | 947 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
964 | { | 948 | { |
965 | struct ftrace_event_call *call = filp->private_data; | 949 | struct ftrace_event_call *call = filp->private_data; |
966 | struct trace_seq *s; | 950 | char buf[32]; |
967 | int r; | 951 | int len; |
968 | 952 | ||
969 | if (*ppos) | 953 | if (*ppos) |
970 | return 0; | 954 | return 0; |
971 | 955 | ||
972 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 956 | len = sprintf(buf, "%d\n", call->event.type); |
973 | if (!s) | 957 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); |
974 | return -ENOMEM; | ||
975 | |||
976 | trace_seq_init(s); | ||
977 | trace_seq_printf(s, "%d\n", call->event.type); | ||
978 | |||
979 | r = simple_read_from_buffer(ubuf, cnt, ppos, | ||
980 | s->buffer, s->len); | ||
981 | kfree(s); | ||
982 | return r; | ||
983 | } | 958 | } |
984 | 959 | ||
985 | static ssize_t | 960 | static ssize_t |
@@ -1218,6 +1193,7 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |||
1218 | 1193 | ||
1219 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); | 1194 | static int ftrace_event_avail_open(struct inode *inode, struct file *file); |
1220 | static int ftrace_event_set_open(struct inode *inode, struct file *file); | 1195 | static int ftrace_event_set_open(struct inode *inode, struct file *file); |
1196 | static int ftrace_event_release(struct inode *inode, struct file *file); | ||
1221 | 1197 | ||
1222 | static const struct seq_operations show_event_seq_ops = { | 1198 | static const struct seq_operations show_event_seq_ops = { |
1223 | .start = t_start, | 1199 | .start = t_start, |
@@ -1245,7 +1221,7 @@ static const struct file_operations ftrace_set_event_fops = { | |||
1245 | .read = seq_read, | 1221 | .read = seq_read, |
1246 | .write = ftrace_event_write, | 1222 | .write = ftrace_event_write, |
1247 | .llseek = seq_lseek, | 1223 | .llseek = seq_lseek, |
1248 | .release = seq_release, | 1224 | .release = ftrace_event_release, |
1249 | }; | 1225 | }; |
1250 | 1226 | ||
1251 | static const struct file_operations ftrace_enable_fops = { | 1227 | static const struct file_operations ftrace_enable_fops = { |
@@ -1323,6 +1299,15 @@ ftrace_event_open(struct inode *inode, struct file *file, | |||
1323 | return ret; | 1299 | return ret; |
1324 | } | 1300 | } |
1325 | 1301 | ||
1302 | static int ftrace_event_release(struct inode *inode, struct file *file) | ||
1303 | { | ||
1304 | struct trace_array *tr = inode->i_private; | ||
1305 | |||
1306 | trace_array_put(tr); | ||
1307 | |||
1308 | return seq_release(inode, file); | ||
1309 | } | ||
1310 | |||
1326 | static int | 1311 | static int |
1327 | ftrace_event_avail_open(struct inode *inode, struct file *file) | 1312 | ftrace_event_avail_open(struct inode *inode, struct file *file) |
1328 | { | 1313 | { |
@@ -1336,12 +1321,19 @@ ftrace_event_set_open(struct inode *inode, struct file *file) | |||
1336 | { | 1321 | { |
1337 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; | 1322 | const struct seq_operations *seq_ops = &show_set_event_seq_ops; |
1338 | struct trace_array *tr = inode->i_private; | 1323 | struct trace_array *tr = inode->i_private; |
1324 | int ret; | ||
1325 | |||
1326 | if (trace_array_get(tr) < 0) | ||
1327 | return -ENODEV; | ||
1339 | 1328 | ||
1340 | if ((file->f_mode & FMODE_WRITE) && | 1329 | if ((file->f_mode & FMODE_WRITE) && |
1341 | (file->f_flags & O_TRUNC)) | 1330 | (file->f_flags & O_TRUNC)) |
1342 | ftrace_clear_events(tr); | 1331 | ftrace_clear_events(tr); |
1343 | 1332 | ||
1344 | return ftrace_event_open(inode, file, seq_ops); | 1333 | ret = ftrace_event_open(inode, file, seq_ops); |
1334 | if (ret < 0) | ||
1335 | trace_array_put(tr); | ||
1336 | return ret; | ||
1345 | } | 1337 | } |
1346 | 1338 | ||
1347 | static struct event_subsystem * | 1339 | static struct event_subsystem * |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0d883dc057d6..0c7b75a8acc8 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -646,7 +646,7 @@ void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | |||
646 | if (filter && filter->filter_string) | 646 | if (filter && filter->filter_string) |
647 | trace_seq_printf(s, "%s\n", filter->filter_string); | 647 | trace_seq_printf(s, "%s\n", filter->filter_string); |
648 | else | 648 | else |
649 | trace_seq_printf(s, "none\n"); | 649 | trace_seq_puts(s, "none\n"); |
650 | mutex_unlock(&event_mutex); | 650 | mutex_unlock(&event_mutex); |
651 | } | 651 | } |
652 | 652 | ||
@@ -660,7 +660,7 @@ void print_subsystem_event_filter(struct event_subsystem *system, | |||
660 | if (filter && filter->filter_string) | 660 | if (filter && filter->filter_string) |
661 | trace_seq_printf(s, "%s\n", filter->filter_string); | 661 | trace_seq_printf(s, "%s\n", filter->filter_string); |
662 | else | 662 | else |
663 | trace_seq_printf(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); | 663 | trace_seq_puts(s, DEFAULT_SYS_FILTER_MESSAGE "\n"); |
664 | mutex_unlock(&event_mutex); | 664 | mutex_unlock(&event_mutex); |
665 | } | 665 | } |
666 | 666 | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index b863f93b30f3..38fe1483c508 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -199,7 +199,7 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) | |||
199 | return 0; | 199 | return 0; |
200 | } | 200 | } |
201 | 201 | ||
202 | static struct tracer function_trace __read_mostly = | 202 | static struct tracer function_trace __tracer_data = |
203 | { | 203 | { |
204 | .name = "function", | 204 | .name = "function", |
205 | .init = function_trace_init, | 205 | .init = function_trace_init, |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 8388bc99f2ee..b5c09242683d 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -446,7 +446,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
446 | 446 | ||
447 | /* First spaces to align center */ | 447 | /* First spaces to align center */ |
448 | for (i = 0; i < spaces / 2; i++) { | 448 | for (i = 0; i < spaces / 2; i++) { |
449 | ret = trace_seq_printf(s, " "); | 449 | ret = trace_seq_putc(s, ' '); |
450 | if (!ret) | 450 | if (!ret) |
451 | return TRACE_TYPE_PARTIAL_LINE; | 451 | return TRACE_TYPE_PARTIAL_LINE; |
452 | } | 452 | } |
@@ -457,7 +457,7 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
457 | 457 | ||
458 | /* Last spaces to align center */ | 458 | /* Last spaces to align center */ |
459 | for (i = 0; i < spaces - (spaces / 2); i++) { | 459 | for (i = 0; i < spaces - (spaces / 2); i++) { |
460 | ret = trace_seq_printf(s, " "); | 460 | ret = trace_seq_putc(s, ' '); |
461 | if (!ret) | 461 | if (!ret) |
462 | return TRACE_TYPE_PARTIAL_LINE; | 462 | return TRACE_TYPE_PARTIAL_LINE; |
463 | } | 463 | } |
@@ -503,7 +503,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
503 | ------------------------------------------ | 503 | ------------------------------------------ |
504 | 504 | ||
505 | */ | 505 | */ |
506 | ret = trace_seq_printf(s, | 506 | ret = trace_seq_puts(s, |
507 | " ------------------------------------------\n"); | 507 | " ------------------------------------------\n"); |
508 | if (!ret) | 508 | if (!ret) |
509 | return TRACE_TYPE_PARTIAL_LINE; | 509 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -516,7 +516,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
516 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 516 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
517 | return TRACE_TYPE_PARTIAL_LINE; | 517 | return TRACE_TYPE_PARTIAL_LINE; |
518 | 518 | ||
519 | ret = trace_seq_printf(s, " => "); | 519 | ret = trace_seq_puts(s, " => "); |
520 | if (!ret) | 520 | if (!ret) |
521 | return TRACE_TYPE_PARTIAL_LINE; | 521 | return TRACE_TYPE_PARTIAL_LINE; |
522 | 522 | ||
@@ -524,7 +524,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
524 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 524 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
525 | return TRACE_TYPE_PARTIAL_LINE; | 525 | return TRACE_TYPE_PARTIAL_LINE; |
526 | 526 | ||
527 | ret = trace_seq_printf(s, | 527 | ret = trace_seq_puts(s, |
528 | "\n ------------------------------------------\n\n"); | 528 | "\n ------------------------------------------\n\n"); |
529 | if (!ret) | 529 | if (!ret) |
530 | return TRACE_TYPE_PARTIAL_LINE; | 530 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -645,7 +645,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
645 | ret = print_graph_proc(s, pid); | 645 | ret = print_graph_proc(s, pid); |
646 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 646 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
647 | return TRACE_TYPE_PARTIAL_LINE; | 647 | return TRACE_TYPE_PARTIAL_LINE; |
648 | ret = trace_seq_printf(s, " | "); | 648 | ret = trace_seq_puts(s, " | "); |
649 | if (!ret) | 649 | if (!ret) |
650 | return TRACE_TYPE_PARTIAL_LINE; | 650 | return TRACE_TYPE_PARTIAL_LINE; |
651 | } | 651 | } |
@@ -657,9 +657,9 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
657 | return ret; | 657 | return ret; |
658 | 658 | ||
659 | if (type == TRACE_GRAPH_ENT) | 659 | if (type == TRACE_GRAPH_ENT) |
660 | ret = trace_seq_printf(s, "==========>"); | 660 | ret = trace_seq_puts(s, "==========>"); |
661 | else | 661 | else |
662 | ret = trace_seq_printf(s, "<=========="); | 662 | ret = trace_seq_puts(s, "<=========="); |
663 | 663 | ||
664 | if (!ret) | 664 | if (!ret) |
665 | return TRACE_TYPE_PARTIAL_LINE; | 665 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -668,7 +668,7 @@ print_graph_irq(struct trace_iterator *iter, unsigned long addr, | |||
668 | if (ret != TRACE_TYPE_HANDLED) | 668 | if (ret != TRACE_TYPE_HANDLED) |
669 | return ret; | 669 | return ret; |
670 | 670 | ||
671 | ret = trace_seq_printf(s, "\n"); | 671 | ret = trace_seq_putc(s, '\n'); |
672 | 672 | ||
673 | if (!ret) | 673 | if (!ret) |
674 | return TRACE_TYPE_PARTIAL_LINE; | 674 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -705,13 +705,13 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
705 | len += strlen(nsecs_str); | 705 | len += strlen(nsecs_str); |
706 | } | 706 | } |
707 | 707 | ||
708 | ret = trace_seq_printf(s, " us "); | 708 | ret = trace_seq_puts(s, " us "); |
709 | if (!ret) | 709 | if (!ret) |
710 | return TRACE_TYPE_PARTIAL_LINE; | 710 | return TRACE_TYPE_PARTIAL_LINE; |
711 | 711 | ||
712 | /* Print remaining spaces to fit the row's width */ | 712 | /* Print remaining spaces to fit the row's width */ |
713 | for (i = len; i < 7; i++) { | 713 | for (i = len; i < 7; i++) { |
714 | ret = trace_seq_printf(s, " "); | 714 | ret = trace_seq_putc(s, ' '); |
715 | if (!ret) | 715 | if (!ret) |
716 | return TRACE_TYPE_PARTIAL_LINE; | 716 | return TRACE_TYPE_PARTIAL_LINE; |
717 | } | 717 | } |
@@ -731,13 +731,13 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
731 | /* No real adata, just filling the column with spaces */ | 731 | /* No real adata, just filling the column with spaces */ |
732 | switch (duration) { | 732 | switch (duration) { |
733 | case DURATION_FILL_FULL: | 733 | case DURATION_FILL_FULL: |
734 | ret = trace_seq_printf(s, " | "); | 734 | ret = trace_seq_puts(s, " | "); |
735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 735 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
736 | case DURATION_FILL_START: | 736 | case DURATION_FILL_START: |
737 | ret = trace_seq_printf(s, " "); | 737 | ret = trace_seq_puts(s, " "); |
738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 738 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
739 | case DURATION_FILL_END: | 739 | case DURATION_FILL_END: |
740 | ret = trace_seq_printf(s, " |"); | 740 | ret = trace_seq_puts(s, " |"); |
741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 741 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; |
742 | } | 742 | } |
743 | 743 | ||
@@ -745,10 +745,10 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
745 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | 745 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { |
746 | /* Duration exceeded 100 msecs */ | 746 | /* Duration exceeded 100 msecs */ |
747 | if (duration > 100000ULL) | 747 | if (duration > 100000ULL) |
748 | ret = trace_seq_printf(s, "! "); | 748 | ret = trace_seq_puts(s, "! "); |
749 | /* Duration exceeded 10 msecs */ | 749 | /* Duration exceeded 10 msecs */ |
750 | else if (duration > 10000ULL) | 750 | else if (duration > 10000ULL) |
751 | ret = trace_seq_printf(s, "+ "); | 751 | ret = trace_seq_puts(s, "+ "); |
752 | } | 752 | } |
753 | 753 | ||
754 | /* | 754 | /* |
@@ -757,7 +757,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
757 | * to fill out the space. | 757 | * to fill out the space. |
758 | */ | 758 | */ |
759 | if (ret == -1) | 759 | if (ret == -1) |
760 | ret = trace_seq_printf(s, " "); | 760 | ret = trace_seq_puts(s, " "); |
761 | 761 | ||
762 | /* Catching here any failure happenned above */ | 762 | /* Catching here any failure happenned above */ |
763 | if (!ret) | 763 | if (!ret) |
@@ -767,7 +767,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s, | |||
767 | if (ret != TRACE_TYPE_HANDLED) | 767 | if (ret != TRACE_TYPE_HANDLED) |
768 | return ret; | 768 | return ret; |
769 | 769 | ||
770 | ret = trace_seq_printf(s, "| "); | 770 | ret = trace_seq_puts(s, "| "); |
771 | if (!ret) | 771 | if (!ret) |
772 | return TRACE_TYPE_PARTIAL_LINE; | 772 | return TRACE_TYPE_PARTIAL_LINE; |
773 | 773 | ||
@@ -817,7 +817,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
817 | 817 | ||
818 | /* Function */ | 818 | /* Function */ |
819 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 819 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
820 | ret = trace_seq_printf(s, " "); | 820 | ret = trace_seq_putc(s, ' '); |
821 | if (!ret) | 821 | if (!ret) |
822 | return TRACE_TYPE_PARTIAL_LINE; | 822 | return TRACE_TYPE_PARTIAL_LINE; |
823 | } | 823 | } |
@@ -858,7 +858,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
858 | 858 | ||
859 | /* Function */ | 859 | /* Function */ |
860 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 860 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
861 | ret = trace_seq_printf(s, " "); | 861 | ret = trace_seq_putc(s, ' '); |
862 | if (!ret) | 862 | if (!ret) |
863 | return TRACE_TYPE_PARTIAL_LINE; | 863 | return TRACE_TYPE_PARTIAL_LINE; |
864 | } | 864 | } |
@@ -917,7 +917,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
917 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 917 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
918 | return TRACE_TYPE_PARTIAL_LINE; | 918 | return TRACE_TYPE_PARTIAL_LINE; |
919 | 919 | ||
920 | ret = trace_seq_printf(s, " | "); | 920 | ret = trace_seq_puts(s, " | "); |
921 | if (!ret) | 921 | if (!ret) |
922 | return TRACE_TYPE_PARTIAL_LINE; | 922 | return TRACE_TYPE_PARTIAL_LINE; |
923 | } | 923 | } |
@@ -1117,7 +1117,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1117 | 1117 | ||
1118 | /* Closing brace */ | 1118 | /* Closing brace */ |
1119 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1119 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
1120 | ret = trace_seq_printf(s, " "); | 1120 | ret = trace_seq_putc(s, ' '); |
1121 | if (!ret) | 1121 | if (!ret) |
1122 | return TRACE_TYPE_PARTIAL_LINE; | 1122 | return TRACE_TYPE_PARTIAL_LINE; |
1123 | } | 1123 | } |
@@ -1129,7 +1129,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
1129 | * belongs to, write out the function name. | 1129 | * belongs to, write out the function name. |
1130 | */ | 1130 | */ |
1131 | if (func_match) { | 1131 | if (func_match) { |
1132 | ret = trace_seq_printf(s, "}\n"); | 1132 | ret = trace_seq_puts(s, "}\n"); |
1133 | if (!ret) | 1133 | if (!ret) |
1134 | return TRACE_TYPE_PARTIAL_LINE; | 1134 | return TRACE_TYPE_PARTIAL_LINE; |
1135 | } else { | 1135 | } else { |
@@ -1179,13 +1179,13 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1179 | /* Indentation */ | 1179 | /* Indentation */ |
1180 | if (depth > 0) | 1180 | if (depth > 0) |
1181 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 1181 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { |
1182 | ret = trace_seq_printf(s, " "); | 1182 | ret = trace_seq_putc(s, ' '); |
1183 | if (!ret) | 1183 | if (!ret) |
1184 | return TRACE_TYPE_PARTIAL_LINE; | 1184 | return TRACE_TYPE_PARTIAL_LINE; |
1185 | } | 1185 | } |
1186 | 1186 | ||
1187 | /* The comment */ | 1187 | /* The comment */ |
1188 | ret = trace_seq_printf(s, "/* "); | 1188 | ret = trace_seq_puts(s, "/* "); |
1189 | if (!ret) | 1189 | if (!ret) |
1190 | return TRACE_TYPE_PARTIAL_LINE; | 1190 | return TRACE_TYPE_PARTIAL_LINE; |
1191 | 1191 | ||
@@ -1216,7 +1216,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
1216 | s->len--; | 1216 | s->len--; |
1217 | } | 1217 | } |
1218 | 1218 | ||
1219 | ret = trace_seq_printf(s, " */\n"); | 1219 | ret = trace_seq_puts(s, " */\n"); |
1220 | if (!ret) | 1220 | if (!ret) |
1221 | return TRACE_TYPE_PARTIAL_LINE; | 1221 | return TRACE_TYPE_PARTIAL_LINE; |
1222 | 1222 | ||
@@ -1448,7 +1448,7 @@ static struct trace_event graph_trace_ret_event = { | |||
1448 | .funcs = &graph_functions | 1448 | .funcs = &graph_functions |
1449 | }; | 1449 | }; |
1450 | 1450 | ||
1451 | static struct tracer graph_trace __read_mostly = { | 1451 | static struct tracer graph_trace __tracer_data = { |
1452 | .name = "function_graph", | 1452 | .name = "function_graph", |
1453 | .open = graph_trace_open, | 1453 | .open = graph_trace_open, |
1454 | .pipe_open = graph_trace_open, | 1454 | .pipe_open = graph_trace_open, |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 7ed6976493c8..3811487e7a7a 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -243,11 +243,11 @@ find_event_file_link(struct trace_probe *tp, struct ftrace_event_file *file) | |||
243 | static int | 243 | static int |
244 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | 244 | disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) |
245 | { | 245 | { |
246 | struct event_file_link *link = NULL; | ||
247 | int wait = 0; | ||
246 | int ret = 0; | 248 | int ret = 0; |
247 | 249 | ||
248 | if (file) { | 250 | if (file) { |
249 | struct event_file_link *link; | ||
250 | |||
251 | link = find_event_file_link(tp, file); | 251 | link = find_event_file_link(tp, file); |
252 | if (!link) { | 252 | if (!link) { |
253 | ret = -EINVAL; | 253 | ret = -EINVAL; |
@@ -255,10 +255,7 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
255 | } | 255 | } |
256 | 256 | ||
257 | list_del_rcu(&link->list); | 257 | list_del_rcu(&link->list); |
258 | /* synchronize with kprobe_trace_func/kretprobe_trace_func */ | 258 | wait = 1; |
259 | synchronize_sched(); | ||
260 | kfree(link); | ||
261 | |||
262 | if (!list_empty(&tp->files)) | 259 | if (!list_empty(&tp->files)) |
263 | goto out; | 260 | goto out; |
264 | 261 | ||
@@ -271,8 +268,22 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file) | |||
271 | disable_kretprobe(&tp->rp); | 268 | disable_kretprobe(&tp->rp); |
272 | else | 269 | else |
273 | disable_kprobe(&tp->rp.kp); | 270 | disable_kprobe(&tp->rp.kp); |
271 | wait = 1; | ||
274 | } | 272 | } |
275 | out: | 273 | out: |
274 | if (wait) { | ||
275 | /* | ||
276 | * Synchronize with kprobe_trace_func/kretprobe_trace_func | ||
277 | * to ensure disabled (all running handlers are finished). | ||
278 | * This is not only for kfree(), but also the caller, | ||
279 | * trace_remove_event_call() supposes it for releasing | ||
280 | * event_call related objects, which will be accessed in | ||
281 | * the kprobe_trace_func/kretprobe_trace_func. | ||
282 | */ | ||
283 | synchronize_sched(); | ||
284 | kfree(link); /* Ignored if link == NULL */ | ||
285 | } | ||
286 | |||
276 | return ret; | 287 | return ret; |
277 | } | 288 | } |
278 | 289 | ||
@@ -1087,9 +1098,6 @@ kprobe_perf_func(struct trace_probe *tp, struct pt_regs *regs) | |||
1087 | __size = sizeof(*entry) + tp->size + dsize; | 1098 | __size = sizeof(*entry) + tp->size + dsize; |
1088 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1099 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1089 | size -= sizeof(u32); | 1100 | size -= sizeof(u32); |
1090 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
1091 | "profile buffer not large enough")) | ||
1092 | return; | ||
1093 | 1101 | ||
1094 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 1102 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1095 | if (!entry) | 1103 | if (!entry) |
@@ -1120,9 +1128,6 @@ kretprobe_perf_func(struct trace_probe *tp, struct kretprobe_instance *ri, | |||
1120 | __size = sizeof(*entry) + tp->size + dsize; | 1128 | __size = sizeof(*entry) + tp->size + dsize; |
1121 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1129 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1122 | size -= sizeof(u32); | 1130 | size -= sizeof(u32); |
1123 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
1124 | "profile buffer not large enough")) | ||
1125 | return; | ||
1126 | 1131 | ||
1127 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 1132 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1128 | if (!entry) | 1133 | if (!entry) |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index a5e8f4878bfa..b3dcfb2f0fef 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -90,7 +90,7 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | |||
90 | if (drv) | 90 | if (drv) |
91 | ret += trace_seq_printf(s, " %s\n", drv->name); | 91 | ret += trace_seq_printf(s, " %s\n", drv->name); |
92 | else | 92 | else |
93 | ret += trace_seq_printf(s, " \n"); | 93 | ret += trace_seq_puts(s, " \n"); |
94 | return ret; | 94 | return ret; |
95 | } | 95 | } |
96 | 96 | ||
@@ -107,7 +107,7 @@ static void mmio_pipe_open(struct trace_iterator *iter) | |||
107 | struct header_iter *hiter; | 107 | struct header_iter *hiter; |
108 | struct trace_seq *s = &iter->seq; | 108 | struct trace_seq *s = &iter->seq; |
109 | 109 | ||
110 | trace_seq_printf(s, "VERSION 20070824\n"); | 110 | trace_seq_puts(s, "VERSION 20070824\n"); |
111 | 111 | ||
112 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); | 112 | hiter = kzalloc(sizeof(*hiter), GFP_KERNEL); |
113 | if (!hiter) | 113 | if (!hiter) |
@@ -209,7 +209,7 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
209 | (rw->value >> 0) & 0xff, rw->pc, 0); | 209 | (rw->value >> 0) & 0xff, rw->pc, 0); |
210 | break; | 210 | break; |
211 | default: | 211 | default: |
212 | ret = trace_seq_printf(s, "rw what?\n"); | 212 | ret = trace_seq_puts(s, "rw what?\n"); |
213 | break; | 213 | break; |
214 | } | 214 | } |
215 | if (ret) | 215 | if (ret) |
@@ -245,7 +245,7 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
245 | secs, usec_rem, m->map_id, 0UL, 0); | 245 | secs, usec_rem, m->map_id, 0UL, 0); |
246 | break; | 246 | break; |
247 | default: | 247 | default: |
248 | ret = trace_seq_printf(s, "map what?\n"); | 248 | ret = trace_seq_puts(s, "map what?\n"); |
249 | break; | 249 | break; |
250 | } | 250 | } |
251 | if (ret) | 251 | if (ret) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index bb922d9ee51b..34e7cbac0c9c 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -78,7 +78,7 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
78 | 78 | ||
79 | trace_assign_type(field, entry); | 79 | trace_assign_type(field, entry); |
80 | 80 | ||
81 | ret = trace_seq_printf(s, "%s", field->buf); | 81 | ret = trace_seq_puts(s, field->buf); |
82 | if (!ret) | 82 | if (!ret) |
83 | return TRACE_TYPE_PARTIAL_LINE; | 83 | return TRACE_TYPE_PARTIAL_LINE; |
84 | 84 | ||
@@ -558,14 +558,14 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
558 | if (ret) | 558 | if (ret) |
559 | ret = trace_seq_puts(s, "??"); | 559 | ret = trace_seq_puts(s, "??"); |
560 | if (ret) | 560 | if (ret) |
561 | ret = trace_seq_puts(s, "\n"); | 561 | ret = trace_seq_putc(s, '\n'); |
562 | continue; | 562 | continue; |
563 | } | 563 | } |
564 | if (!ret) | 564 | if (!ret) |
565 | break; | 565 | break; |
566 | if (ret) | 566 | if (ret) |
567 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | 567 | ret = seq_print_user_ip(s, mm, ip, sym_flags); |
568 | ret = trace_seq_puts(s, "\n"); | 568 | ret = trace_seq_putc(s, '\n'); |
569 | } | 569 | } |
570 | 570 | ||
571 | if (mm) | 571 | if (mm) |
@@ -579,7 +579,7 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | |||
579 | int ret; | 579 | int ret; |
580 | 580 | ||
581 | if (!ip) | 581 | if (!ip) |
582 | return trace_seq_printf(s, "0"); | 582 | return trace_seq_putc(s, '0'); |
583 | 583 | ||
584 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | 584 | if (sym_flags & TRACE_ITER_SYM_OFFSET) |
585 | ret = seq_print_sym_offset(s, "%s", ip); | 585 | ret = seq_print_sym_offset(s, "%s", ip); |
@@ -964,14 +964,14 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, | |||
964 | goto partial; | 964 | goto partial; |
965 | 965 | ||
966 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { | 966 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { |
967 | if (!trace_seq_printf(s, " <-")) | 967 | if (!trace_seq_puts(s, " <-")) |
968 | goto partial; | 968 | goto partial; |
969 | if (!seq_print_ip_sym(s, | 969 | if (!seq_print_ip_sym(s, |
970 | field->parent_ip, | 970 | field->parent_ip, |
971 | flags)) | 971 | flags)) |
972 | goto partial; | 972 | goto partial; |
973 | } | 973 | } |
974 | if (!trace_seq_printf(s, "\n")) | 974 | if (!trace_seq_putc(s, '\n')) |
975 | goto partial; | 975 | goto partial; |
976 | 976 | ||
977 | return TRACE_TYPE_HANDLED; | 977 | return TRACE_TYPE_HANDLED; |
@@ -1210,7 +1210,7 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1210 | 1210 | ||
1211 | if (!seq_print_ip_sym(s, *p, flags)) | 1211 | if (!seq_print_ip_sym(s, *p, flags)) |
1212 | goto partial; | 1212 | goto partial; |
1213 | if (!trace_seq_puts(s, "\n")) | 1213 | if (!trace_seq_putc(s, '\n')) |
1214 | goto partial; | 1214 | goto partial; |
1215 | } | 1215 | } |
1216 | 1216 | ||
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 322e16461072..8fd03657bc7d 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -175,7 +175,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
175 | entry = syscall_nr_to_meta(syscall); | 175 | entry = syscall_nr_to_meta(syscall); |
176 | 176 | ||
177 | if (!entry) { | 177 | if (!entry) { |
178 | trace_seq_printf(s, "\n"); | 178 | trace_seq_putc(s, '\n'); |
179 | return TRACE_TYPE_HANDLED; | 179 | return TRACE_TYPE_HANDLED; |
180 | } | 180 | } |
181 | 181 | ||
@@ -566,15 +566,15 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
566 | if (!sys_data) | 566 | if (!sys_data) |
567 | return; | 567 | return; |
568 | 568 | ||
569 | head = this_cpu_ptr(sys_data->enter_event->perf_events); | ||
570 | if (hlist_empty(head)) | ||
571 | return; | ||
572 | |||
569 | /* get the size after alignment with the u32 buffer size field */ | 573 | /* get the size after alignment with the u32 buffer size field */ |
570 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); | 574 | size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec); |
571 | size = ALIGN(size + sizeof(u32), sizeof(u64)); | 575 | size = ALIGN(size + sizeof(u32), sizeof(u64)); |
572 | size -= sizeof(u32); | 576 | size -= sizeof(u32); |
573 | 577 | ||
574 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
575 | "perf buffer not large enough")) | ||
576 | return; | ||
577 | |||
578 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, | 578 | rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, |
579 | sys_data->enter_event->event.type, regs, &rctx); | 579 | sys_data->enter_event->event.type, regs, &rctx); |
580 | if (!rec) | 580 | if (!rec) |
@@ -583,8 +583,6 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
583 | rec->nr = syscall_nr; | 583 | rec->nr = syscall_nr; |
584 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 584 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
585 | (unsigned long *)&rec->args); | 585 | (unsigned long *)&rec->args); |
586 | |||
587 | head = this_cpu_ptr(sys_data->enter_event->perf_events); | ||
588 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 586 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
589 | } | 587 | } |
590 | 588 | ||
@@ -642,18 +640,14 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
642 | if (!sys_data) | 640 | if (!sys_data) |
643 | return; | 641 | return; |
644 | 642 | ||
643 | head = this_cpu_ptr(sys_data->exit_event->perf_events); | ||
644 | if (hlist_empty(head)) | ||
645 | return; | ||
646 | |||
645 | /* We can probably do that at build time */ | 647 | /* We can probably do that at build time */ |
646 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); | 648 | size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64)); |
647 | size -= sizeof(u32); | 649 | size -= sizeof(u32); |
648 | 650 | ||
649 | /* | ||
650 | * Impossible, but be paranoid with the future | ||
651 | * How to put this check outside runtime? | ||
652 | */ | ||
653 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | ||
654 | "exit event has grown above perf buffer size")) | ||
655 | return; | ||
656 | |||
657 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, | 651 | rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, |
658 | sys_data->exit_event->event.type, regs, &rctx); | 652 | sys_data->exit_event->event.type, regs, &rctx); |
659 | if (!rec) | 653 | if (!rec) |
@@ -661,8 +655,6 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
661 | 655 | ||
662 | rec->nr = syscall_nr; | 656 | rec->nr = syscall_nr; |
663 | rec->ret = syscall_get_return_value(current, regs); | 657 | rec->ret = syscall_get_return_value(current, regs); |
664 | |||
665 | head = this_cpu_ptr(sys_data->exit_event->perf_events); | ||
666 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); | 658 | perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); |
667 | } | 659 | } |
668 | 660 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index d5d0cd368a56..a23d2d71188e 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -818,8 +818,6 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
818 | 818 | ||
819 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 819 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
820 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 820 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
821 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | ||
822 | return; | ||
823 | 821 | ||
824 | preempt_disable(); | 822 | preempt_disable(); |
825 | head = this_cpu_ptr(call->perf_events); | 823 | head = this_cpu_ptr(call->perf_events); |
diff --git a/kernel/wait.c b/kernel/wait.c index ce0daa320a26..dec68bd4e9d8 100644 --- a/kernel/wait.c +++ b/kernel/wait.c | |||
@@ -333,7 +333,8 @@ int __wait_on_atomic_t(wait_queue_head_t *wq, struct wait_bit_queue *q, | |||
333 | prepare_to_wait(wq, &q->wait, mode); | 333 | prepare_to_wait(wq, &q->wait, mode); |
334 | val = q->key.flags; | 334 | val = q->key.flags; |
335 | if (atomic_read(val) == 0) | 335 | if (atomic_read(val) == 0) |
336 | ret = (*action)(val); | 336 | break; |
337 | ret = (*action)(val); | ||
337 | } while (!ret && atomic_read(val) != 0); | 338 | } while (!ret && atomic_read(val) != 0); |
338 | finish_wait(wq, &q->wait); | 339 | finish_wait(wq, &q->wait); |
339 | return ret; | 340 | return ret; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..0b72e816b8d0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4644,7 +4644,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4644 | * Workqueues should be brought up before normal priority CPU notifiers. | 4644 | * Workqueues should be brought up before normal priority CPU notifiers. |
4645 | * This will be registered high priority CPU notifier. | 4645 | * This will be registered high priority CPU notifier. |
4646 | */ | 4646 | */ |
4647 | static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | 4647 | static int workqueue_cpu_up_callback(struct notifier_block *nfb, |
4648 | unsigned long action, | 4648 | unsigned long action, |
4649 | void *hcpu) | 4649 | void *hcpu) |
4650 | { | 4650 | { |
@@ -4697,7 +4697,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
4697 | * Workqueues should be brought down after normal priority CPU notifiers. | 4697 | * Workqueues should be brought down after normal priority CPU notifiers. |
4698 | * This will be registered as low priority CPU notifier. | 4698 | * This will be registered as low priority CPU notifier. |
4699 | */ | 4699 | */ |
4700 | static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, | 4700 | static int workqueue_cpu_down_callback(struct notifier_block *nfb, |
4701 | unsigned long action, | 4701 | unsigned long action, |
4702 | void *hcpu) | 4702 | void *hcpu) |
4703 | { | 4703 | { |