diff options
Diffstat (limited to 'kernel')
33 files changed, 1451 insertions, 2925 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 057472fbc272..ce53fb2bd1d9 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -76,8 +76,8 @@ obj-$(CONFIG_GCOV_KERNEL) += gcov/ | |||
76 | obj-$(CONFIG_AUDIT_TREE) += audit_tree.o | 76 | obj-$(CONFIG_AUDIT_TREE) += audit_tree.o |
77 | obj-$(CONFIG_KPROBES) += kprobes.o | 77 | obj-$(CONFIG_KPROBES) += kprobes.o |
78 | obj-$(CONFIG_KGDB) += debug/ | 78 | obj-$(CONFIG_KGDB) += debug/ |
79 | obj-$(CONFIG_DETECT_SOFTLOCKUP) += softlockup.o | ||
80 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o | 79 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o |
80 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o | ||
81 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ | 81 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ |
82 | obj-$(CONFIG_SECCOMP) += seccomp.o | 82 | obj-$(CONFIG_SECCOMP) += seccomp.o |
83 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | 83 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 71ed3ce29e12..d71a987fd2bf 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/sched.h> | 41 | #include <linux/sched.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | 43 | #include <linux/slab.h> |
44 | #include <linux/list.h> | ||
44 | #include <linux/cpu.h> | 45 | #include <linux/cpu.h> |
45 | #include <linux/smp.h> | 46 | #include <linux/smp.h> |
46 | 47 | ||
@@ -62,6 +63,9 @@ static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]); | |||
62 | 63 | ||
63 | static int nr_slots[TYPE_MAX]; | 64 | static int nr_slots[TYPE_MAX]; |
64 | 65 | ||
66 | /* Keep track of the breakpoints attached to tasks */ | ||
67 | static LIST_HEAD(bp_task_head); | ||
68 | |||
65 | static int constraints_initialized; | 69 | static int constraints_initialized; |
66 | 70 | ||
67 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ | 71 | /* Gather the number of total pinned and un-pinned bp in a cpuset */ |
@@ -103,33 +107,21 @@ static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type) | |||
103 | return 0; | 107 | return 0; |
104 | } | 108 | } |
105 | 109 | ||
106 | static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type) | 110 | /* |
111 | * Count the number of breakpoints of the same type and same task. | ||
112 | * The given event must be not on the list. | ||
113 | */ | ||
114 | static int task_bp_pinned(struct perf_event *bp, enum bp_type_idx type) | ||
107 | { | 115 | { |
108 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | 116 | struct perf_event_context *ctx = bp->ctx; |
109 | struct list_head *list; | 117 | struct perf_event *iter; |
110 | struct perf_event *bp; | ||
111 | unsigned long flags; | ||
112 | int count = 0; | 118 | int count = 0; |
113 | 119 | ||
114 | if (WARN_ONCE(!ctx, "No perf context for this task")) | 120 | list_for_each_entry(iter, &bp_task_head, hw.bp_list) { |
115 | return 0; | 121 | if (iter->ctx == ctx && find_slot_idx(iter) == type) |
116 | 122 | count += hw_breakpoint_weight(iter); | |
117 | list = &ctx->event_list; | ||
118 | |||
119 | raw_spin_lock_irqsave(&ctx->lock, flags); | ||
120 | |||
121 | /* | ||
122 | * The current breakpoint counter is not included in the list | ||
123 | * at the open() callback time | ||
124 | */ | ||
125 | list_for_each_entry(bp, list, event_entry) { | ||
126 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
127 | if (find_slot_idx(bp) == type) | ||
128 | count += hw_breakpoint_weight(bp); | ||
129 | } | 123 | } |
130 | 124 | ||
131 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | ||
132 | |||
133 | return count; | 125 | return count; |
134 | } | 126 | } |
135 | 127 | ||
@@ -149,7 +141,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, | |||
149 | if (!tsk) | 141 | if (!tsk) |
150 | slots->pinned += max_task_bp_pinned(cpu, type); | 142 | slots->pinned += max_task_bp_pinned(cpu, type); |
151 | else | 143 | else |
152 | slots->pinned += task_bp_pinned(tsk, type); | 144 | slots->pinned += task_bp_pinned(bp, type); |
153 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); | 145 | slots->flexible = per_cpu(nr_bp_flexible[type], cpu); |
154 | 146 | ||
155 | return; | 147 | return; |
@@ -162,7 +154,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, | |||
162 | if (!tsk) | 154 | if (!tsk) |
163 | nr += max_task_bp_pinned(cpu, type); | 155 | nr += max_task_bp_pinned(cpu, type); |
164 | else | 156 | else |
165 | nr += task_bp_pinned(tsk, type); | 157 | nr += task_bp_pinned(bp, type); |
166 | 158 | ||
167 | if (nr > slots->pinned) | 159 | if (nr > slots->pinned) |
168 | slots->pinned = nr; | 160 | slots->pinned = nr; |
@@ -188,7 +180,7 @@ fetch_this_slot(struct bp_busy_slots *slots, int weight) | |||
188 | /* | 180 | /* |
189 | * Add a pinned breakpoint for the given task in our constraint table | 181 | * Add a pinned breakpoint for the given task in our constraint table |
190 | */ | 182 | */ |
191 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, | 183 | static void toggle_bp_task_slot(struct perf_event *bp, int cpu, bool enable, |
192 | enum bp_type_idx type, int weight) | 184 | enum bp_type_idx type, int weight) |
193 | { | 185 | { |
194 | unsigned int *tsk_pinned; | 186 | unsigned int *tsk_pinned; |
@@ -196,10 +188,11 @@ static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable, | |||
196 | int old_idx = 0; | 188 | int old_idx = 0; |
197 | int idx = 0; | 189 | int idx = 0; |
198 | 190 | ||
199 | old_count = task_bp_pinned(tsk, type); | 191 | old_count = task_bp_pinned(bp, type); |
200 | old_idx = old_count - 1; | 192 | old_idx = old_count - 1; |
201 | idx = old_idx + weight; | 193 | idx = old_idx + weight; |
202 | 194 | ||
195 | /* tsk_pinned[n] is the number of tasks having n breakpoints */ | ||
203 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); | 196 | tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu); |
204 | if (enable) { | 197 | if (enable) { |
205 | tsk_pinned[idx]++; | 198 | tsk_pinned[idx]++; |
@@ -222,23 +215,30 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, | |||
222 | int cpu = bp->cpu; | 215 | int cpu = bp->cpu; |
223 | struct task_struct *tsk = bp->ctx->task; | 216 | struct task_struct *tsk = bp->ctx->task; |
224 | 217 | ||
218 | /* Pinned counter cpu profiling */ | ||
219 | if (!tsk) { | ||
220 | |||
221 | if (enable) | ||
222 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; | ||
223 | else | ||
224 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; | ||
225 | return; | ||
226 | } | ||
227 | |||
225 | /* Pinned counter task profiling */ | 228 | /* Pinned counter task profiling */ |
226 | if (tsk) { | ||
227 | if (cpu >= 0) { | ||
228 | toggle_bp_task_slot(tsk, cpu, enable, type, weight); | ||
229 | return; | ||
230 | } | ||
231 | 229 | ||
230 | if (!enable) | ||
231 | list_del(&bp->hw.bp_list); | ||
232 | |||
233 | if (cpu >= 0) { | ||
234 | toggle_bp_task_slot(bp, cpu, enable, type, weight); | ||
235 | } else { | ||
232 | for_each_online_cpu(cpu) | 236 | for_each_online_cpu(cpu) |
233 | toggle_bp_task_slot(tsk, cpu, enable, type, weight); | 237 | toggle_bp_task_slot(bp, cpu, enable, type, weight); |
234 | return; | ||
235 | } | 238 | } |
236 | 239 | ||
237 | /* Pinned counter cpu profiling */ | ||
238 | if (enable) | 240 | if (enable) |
239 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight; | 241 | list_add_tail(&bp->hw.bp_list, &bp_task_head); |
240 | else | ||
241 | per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight; | ||
242 | } | 242 | } |
243 | 243 | ||
244 | /* | 244 | /* |
@@ -312,6 +312,10 @@ static int __reserve_bp_slot(struct perf_event *bp) | |||
312 | weight = hw_breakpoint_weight(bp); | 312 | weight = hw_breakpoint_weight(bp); |
313 | 313 | ||
314 | fetch_bp_busy_slots(&slots, bp, type); | 314 | fetch_bp_busy_slots(&slots, bp, type); |
315 | /* | ||
316 | * Simulate the addition of this breakpoint to the constraints | ||
317 | * and see the result. | ||
318 | */ | ||
315 | fetch_this_slot(&slots, weight); | 319 | fetch_this_slot(&slots, weight); |
316 | 320 | ||
317 | /* Flexible counters need to keep at least one slot */ | 321 | /* Flexible counters need to keep at least one slot */ |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index ff86c558af4c..c772a3d4000d 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event, | |||
675 | struct perf_event *event, *partial_group = NULL; | 675 | struct perf_event *event, *partial_group = NULL; |
676 | const struct pmu *pmu = group_event->pmu; | 676 | const struct pmu *pmu = group_event->pmu; |
677 | bool txn = false; | 677 | bool txn = false; |
678 | int ret; | ||
679 | 678 | ||
680 | if (group_event->state == PERF_EVENT_STATE_OFF) | 679 | if (group_event->state == PERF_EVENT_STATE_OFF) |
681 | return 0; | 680 | return 0; |
@@ -703,14 +702,8 @@ group_sched_in(struct perf_event *group_event, | |||
703 | } | 702 | } |
704 | } | 703 | } |
705 | 704 | ||
706 | if (!txn) | 705 | if (!txn || !pmu->commit_txn(pmu)) |
707 | return 0; | ||
708 | |||
709 | ret = pmu->commit_txn(pmu); | ||
710 | if (!ret) { | ||
711 | pmu->cancel_txn(pmu); | ||
712 | return 0; | 706 | return 0; |
713 | } | ||
714 | 707 | ||
715 | group_error: | 708 | group_error: |
716 | /* | 709 | /* |
@@ -1155,9 +1148,9 @@ static void __perf_event_sync_stat(struct perf_event *event, | |||
1155 | * In order to keep per-task stats reliable we need to flip the event | 1148 | * In order to keep per-task stats reliable we need to flip the event |
1156 | * values when we flip the contexts. | 1149 | * values when we flip the contexts. |
1157 | */ | 1150 | */ |
1158 | value = atomic64_read(&next_event->count); | 1151 | value = local64_read(&next_event->count); |
1159 | value = atomic64_xchg(&event->count, value); | 1152 | value = local64_xchg(&event->count, value); |
1160 | atomic64_set(&next_event->count, value); | 1153 | local64_set(&next_event->count, value); |
1161 | 1154 | ||
1162 | swap(event->total_time_enabled, next_event->total_time_enabled); | 1155 | swap(event->total_time_enabled, next_event->total_time_enabled); |
1163 | swap(event->total_time_running, next_event->total_time_running); | 1156 | swap(event->total_time_running, next_event->total_time_running); |
@@ -1547,10 +1540,10 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) | |||
1547 | 1540 | ||
1548 | hwc->sample_period = sample_period; | 1541 | hwc->sample_period = sample_period; |
1549 | 1542 | ||
1550 | if (atomic64_read(&hwc->period_left) > 8*sample_period) { | 1543 | if (local64_read(&hwc->period_left) > 8*sample_period) { |
1551 | perf_disable(); | 1544 | perf_disable(); |
1552 | perf_event_stop(event); | 1545 | perf_event_stop(event); |
1553 | atomic64_set(&hwc->period_left, 0); | 1546 | local64_set(&hwc->period_left, 0); |
1554 | perf_event_start(event); | 1547 | perf_event_start(event); |
1555 | perf_enable(); | 1548 | perf_enable(); |
1556 | } | 1549 | } |
@@ -1591,7 +1584,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) | |||
1591 | 1584 | ||
1592 | perf_disable(); | 1585 | perf_disable(); |
1593 | event->pmu->read(event); | 1586 | event->pmu->read(event); |
1594 | now = atomic64_read(&event->count); | 1587 | now = local64_read(&event->count); |
1595 | delta = now - hwc->freq_count_stamp; | 1588 | delta = now - hwc->freq_count_stamp; |
1596 | hwc->freq_count_stamp = now; | 1589 | hwc->freq_count_stamp = now; |
1597 | 1590 | ||
@@ -1743,6 +1736,11 @@ static void __perf_event_read(void *info) | |||
1743 | event->pmu->read(event); | 1736 | event->pmu->read(event); |
1744 | } | 1737 | } |
1745 | 1738 | ||
1739 | static inline u64 perf_event_count(struct perf_event *event) | ||
1740 | { | ||
1741 | return local64_read(&event->count) + atomic64_read(&event->child_count); | ||
1742 | } | ||
1743 | |||
1746 | static u64 perf_event_read(struct perf_event *event) | 1744 | static u64 perf_event_read(struct perf_event *event) |
1747 | { | 1745 | { |
1748 | /* | 1746 | /* |
@@ -1762,7 +1760,7 @@ static u64 perf_event_read(struct perf_event *event) | |||
1762 | raw_spin_unlock_irqrestore(&ctx->lock, flags); | 1760 | raw_spin_unlock_irqrestore(&ctx->lock, flags); |
1763 | } | 1761 | } |
1764 | 1762 | ||
1765 | return atomic64_read(&event->count); | 1763 | return perf_event_count(event); |
1766 | } | 1764 | } |
1767 | 1765 | ||
1768 | /* | 1766 | /* |
@@ -1883,7 +1881,7 @@ static void free_event_rcu(struct rcu_head *head) | |||
1883 | } | 1881 | } |
1884 | 1882 | ||
1885 | static void perf_pending_sync(struct perf_event *event); | 1883 | static void perf_pending_sync(struct perf_event *event); |
1886 | static void perf_mmap_data_put(struct perf_mmap_data *data); | 1884 | static void perf_buffer_put(struct perf_buffer *buffer); |
1887 | 1885 | ||
1888 | static void free_event(struct perf_event *event) | 1886 | static void free_event(struct perf_event *event) |
1889 | { | 1887 | { |
@@ -1891,7 +1889,7 @@ static void free_event(struct perf_event *event) | |||
1891 | 1889 | ||
1892 | if (!event->parent) { | 1890 | if (!event->parent) { |
1893 | atomic_dec(&nr_events); | 1891 | atomic_dec(&nr_events); |
1894 | if (event->attr.mmap) | 1892 | if (event->attr.mmap || event->attr.mmap_data) |
1895 | atomic_dec(&nr_mmap_events); | 1893 | atomic_dec(&nr_mmap_events); |
1896 | if (event->attr.comm) | 1894 | if (event->attr.comm) |
1897 | atomic_dec(&nr_comm_events); | 1895 | atomic_dec(&nr_comm_events); |
@@ -1899,9 +1897,9 @@ static void free_event(struct perf_event *event) | |||
1899 | atomic_dec(&nr_task_events); | 1897 | atomic_dec(&nr_task_events); |
1900 | } | 1898 | } |
1901 | 1899 | ||
1902 | if (event->data) { | 1900 | if (event->buffer) { |
1903 | perf_mmap_data_put(event->data); | 1901 | perf_buffer_put(event->buffer); |
1904 | event->data = NULL; | 1902 | event->buffer = NULL; |
1905 | } | 1903 | } |
1906 | 1904 | ||
1907 | if (event->destroy) | 1905 | if (event->destroy) |
@@ -2126,13 +2124,13 @@ perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | |||
2126 | static unsigned int perf_poll(struct file *file, poll_table *wait) | 2124 | static unsigned int perf_poll(struct file *file, poll_table *wait) |
2127 | { | 2125 | { |
2128 | struct perf_event *event = file->private_data; | 2126 | struct perf_event *event = file->private_data; |
2129 | struct perf_mmap_data *data; | 2127 | struct perf_buffer *buffer; |
2130 | unsigned int events = POLL_HUP; | 2128 | unsigned int events = POLL_HUP; |
2131 | 2129 | ||
2132 | rcu_read_lock(); | 2130 | rcu_read_lock(); |
2133 | data = rcu_dereference(event->data); | 2131 | buffer = rcu_dereference(event->buffer); |
2134 | if (data) | 2132 | if (buffer) |
2135 | events = atomic_xchg(&data->poll, 0); | 2133 | events = atomic_xchg(&buffer->poll, 0); |
2136 | rcu_read_unlock(); | 2134 | rcu_read_unlock(); |
2137 | 2135 | ||
2138 | poll_wait(file, &event->waitq, wait); | 2136 | poll_wait(file, &event->waitq, wait); |
@@ -2143,7 +2141,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) | |||
2143 | static void perf_event_reset(struct perf_event *event) | 2141 | static void perf_event_reset(struct perf_event *event) |
2144 | { | 2142 | { |
2145 | (void)perf_event_read(event); | 2143 | (void)perf_event_read(event); |
2146 | atomic64_set(&event->count, 0); | 2144 | local64_set(&event->count, 0); |
2147 | perf_event_update_userpage(event); | 2145 | perf_event_update_userpage(event); |
2148 | } | 2146 | } |
2149 | 2147 | ||
@@ -2342,14 +2340,14 @@ static int perf_event_index(struct perf_event *event) | |||
2342 | void perf_event_update_userpage(struct perf_event *event) | 2340 | void perf_event_update_userpage(struct perf_event *event) |
2343 | { | 2341 | { |
2344 | struct perf_event_mmap_page *userpg; | 2342 | struct perf_event_mmap_page *userpg; |
2345 | struct perf_mmap_data *data; | 2343 | struct perf_buffer *buffer; |
2346 | 2344 | ||
2347 | rcu_read_lock(); | 2345 | rcu_read_lock(); |
2348 | data = rcu_dereference(event->data); | 2346 | buffer = rcu_dereference(event->buffer); |
2349 | if (!data) | 2347 | if (!buffer) |
2350 | goto unlock; | 2348 | goto unlock; |
2351 | 2349 | ||
2352 | userpg = data->user_page; | 2350 | userpg = buffer->user_page; |
2353 | 2351 | ||
2354 | /* | 2352 | /* |
2355 | * Disable preemption so as to not let the corresponding user-space | 2353 | * Disable preemption so as to not let the corresponding user-space |
@@ -2359,9 +2357,9 @@ void perf_event_update_userpage(struct perf_event *event) | |||
2359 | ++userpg->lock; | 2357 | ++userpg->lock; |
2360 | barrier(); | 2358 | barrier(); |
2361 | userpg->index = perf_event_index(event); | 2359 | userpg->index = perf_event_index(event); |
2362 | userpg->offset = atomic64_read(&event->count); | 2360 | userpg->offset = perf_event_count(event); |
2363 | if (event->state == PERF_EVENT_STATE_ACTIVE) | 2361 | if (event->state == PERF_EVENT_STATE_ACTIVE) |
2364 | userpg->offset -= atomic64_read(&event->hw.prev_count); | 2362 | userpg->offset -= local64_read(&event->hw.prev_count); |
2365 | 2363 | ||
2366 | userpg->time_enabled = event->total_time_enabled + | 2364 | userpg->time_enabled = event->total_time_enabled + |
2367 | atomic64_read(&event->child_total_time_enabled); | 2365 | atomic64_read(&event->child_total_time_enabled); |
@@ -2376,6 +2374,25 @@ unlock: | |||
2376 | rcu_read_unlock(); | 2374 | rcu_read_unlock(); |
2377 | } | 2375 | } |
2378 | 2376 | ||
2377 | static unsigned long perf_data_size(struct perf_buffer *buffer); | ||
2378 | |||
2379 | static void | ||
2380 | perf_buffer_init(struct perf_buffer *buffer, long watermark, int flags) | ||
2381 | { | ||
2382 | long max_size = perf_data_size(buffer); | ||
2383 | |||
2384 | if (watermark) | ||
2385 | buffer->watermark = min(max_size, watermark); | ||
2386 | |||
2387 | if (!buffer->watermark) | ||
2388 | buffer->watermark = max_size / 2; | ||
2389 | |||
2390 | if (flags & PERF_BUFFER_WRITABLE) | ||
2391 | buffer->writable = 1; | ||
2392 | |||
2393 | atomic_set(&buffer->refcount, 1); | ||
2394 | } | ||
2395 | |||
2379 | #ifndef CONFIG_PERF_USE_VMALLOC | 2396 | #ifndef CONFIG_PERF_USE_VMALLOC |
2380 | 2397 | ||
2381 | /* | 2398 | /* |
@@ -2383,15 +2400,15 @@ unlock: | |||
2383 | */ | 2400 | */ |
2384 | 2401 | ||
2385 | static struct page * | 2402 | static struct page * |
2386 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | 2403 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) |
2387 | { | 2404 | { |
2388 | if (pgoff > data->nr_pages) | 2405 | if (pgoff > buffer->nr_pages) |
2389 | return NULL; | 2406 | return NULL; |
2390 | 2407 | ||
2391 | if (pgoff == 0) | 2408 | if (pgoff == 0) |
2392 | return virt_to_page(data->user_page); | 2409 | return virt_to_page(buffer->user_page); |
2393 | 2410 | ||
2394 | return virt_to_page(data->data_pages[pgoff - 1]); | 2411 | return virt_to_page(buffer->data_pages[pgoff - 1]); |
2395 | } | 2412 | } |
2396 | 2413 | ||
2397 | static void *perf_mmap_alloc_page(int cpu) | 2414 | static void *perf_mmap_alloc_page(int cpu) |
@@ -2407,42 +2424,44 @@ static void *perf_mmap_alloc_page(int cpu) | |||
2407 | return page_address(page); | 2424 | return page_address(page); |
2408 | } | 2425 | } |
2409 | 2426 | ||
2410 | static struct perf_mmap_data * | 2427 | static struct perf_buffer * |
2411 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | 2428 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) |
2412 | { | 2429 | { |
2413 | struct perf_mmap_data *data; | 2430 | struct perf_buffer *buffer; |
2414 | unsigned long size; | 2431 | unsigned long size; |
2415 | int i; | 2432 | int i; |
2416 | 2433 | ||
2417 | size = sizeof(struct perf_mmap_data); | 2434 | size = sizeof(struct perf_buffer); |
2418 | size += nr_pages * sizeof(void *); | 2435 | size += nr_pages * sizeof(void *); |
2419 | 2436 | ||
2420 | data = kzalloc(size, GFP_KERNEL); | 2437 | buffer = kzalloc(size, GFP_KERNEL); |
2421 | if (!data) | 2438 | if (!buffer) |
2422 | goto fail; | 2439 | goto fail; |
2423 | 2440 | ||
2424 | data->user_page = perf_mmap_alloc_page(event->cpu); | 2441 | buffer->user_page = perf_mmap_alloc_page(cpu); |
2425 | if (!data->user_page) | 2442 | if (!buffer->user_page) |
2426 | goto fail_user_page; | 2443 | goto fail_user_page; |
2427 | 2444 | ||
2428 | for (i = 0; i < nr_pages; i++) { | 2445 | for (i = 0; i < nr_pages; i++) { |
2429 | data->data_pages[i] = perf_mmap_alloc_page(event->cpu); | 2446 | buffer->data_pages[i] = perf_mmap_alloc_page(cpu); |
2430 | if (!data->data_pages[i]) | 2447 | if (!buffer->data_pages[i]) |
2431 | goto fail_data_pages; | 2448 | goto fail_data_pages; |
2432 | } | 2449 | } |
2433 | 2450 | ||
2434 | data->nr_pages = nr_pages; | 2451 | buffer->nr_pages = nr_pages; |
2452 | |||
2453 | perf_buffer_init(buffer, watermark, flags); | ||
2435 | 2454 | ||
2436 | return data; | 2455 | return buffer; |
2437 | 2456 | ||
2438 | fail_data_pages: | 2457 | fail_data_pages: |
2439 | for (i--; i >= 0; i--) | 2458 | for (i--; i >= 0; i--) |
2440 | free_page((unsigned long)data->data_pages[i]); | 2459 | free_page((unsigned long)buffer->data_pages[i]); |
2441 | 2460 | ||
2442 | free_page((unsigned long)data->user_page); | 2461 | free_page((unsigned long)buffer->user_page); |
2443 | 2462 | ||
2444 | fail_user_page: | 2463 | fail_user_page: |
2445 | kfree(data); | 2464 | kfree(buffer); |
2446 | 2465 | ||
2447 | fail: | 2466 | fail: |
2448 | return NULL; | 2467 | return NULL; |
@@ -2456,17 +2475,17 @@ static void perf_mmap_free_page(unsigned long addr) | |||
2456 | __free_page(page); | 2475 | __free_page(page); |
2457 | } | 2476 | } |
2458 | 2477 | ||
2459 | static void perf_mmap_data_free(struct perf_mmap_data *data) | 2478 | static void perf_buffer_free(struct perf_buffer *buffer) |
2460 | { | 2479 | { |
2461 | int i; | 2480 | int i; |
2462 | 2481 | ||
2463 | perf_mmap_free_page((unsigned long)data->user_page); | 2482 | perf_mmap_free_page((unsigned long)buffer->user_page); |
2464 | for (i = 0; i < data->nr_pages; i++) | 2483 | for (i = 0; i < buffer->nr_pages; i++) |
2465 | perf_mmap_free_page((unsigned long)data->data_pages[i]); | 2484 | perf_mmap_free_page((unsigned long)buffer->data_pages[i]); |
2466 | kfree(data); | 2485 | kfree(buffer); |
2467 | } | 2486 | } |
2468 | 2487 | ||
2469 | static inline int page_order(struct perf_mmap_data *data) | 2488 | static inline int page_order(struct perf_buffer *buffer) |
2470 | { | 2489 | { |
2471 | return 0; | 2490 | return 0; |
2472 | } | 2491 | } |
@@ -2479,18 +2498,18 @@ static inline int page_order(struct perf_mmap_data *data) | |||
2479 | * Required for architectures that have d-cache aliasing issues. | 2498 | * Required for architectures that have d-cache aliasing issues. |
2480 | */ | 2499 | */ |
2481 | 2500 | ||
2482 | static inline int page_order(struct perf_mmap_data *data) | 2501 | static inline int page_order(struct perf_buffer *buffer) |
2483 | { | 2502 | { |
2484 | return data->page_order; | 2503 | return buffer->page_order; |
2485 | } | 2504 | } |
2486 | 2505 | ||
2487 | static struct page * | 2506 | static struct page * |
2488 | perf_mmap_to_page(struct perf_mmap_data *data, unsigned long pgoff) | 2507 | perf_mmap_to_page(struct perf_buffer *buffer, unsigned long pgoff) |
2489 | { | 2508 | { |
2490 | if (pgoff > (1UL << page_order(data))) | 2509 | if (pgoff > (1UL << page_order(buffer))) |
2491 | return NULL; | 2510 | return NULL; |
2492 | 2511 | ||
2493 | return vmalloc_to_page((void *)data->user_page + pgoff * PAGE_SIZE); | 2512 | return vmalloc_to_page((void *)buffer->user_page + pgoff * PAGE_SIZE); |
2494 | } | 2513 | } |
2495 | 2514 | ||
2496 | static void perf_mmap_unmark_page(void *addr) | 2515 | static void perf_mmap_unmark_page(void *addr) |
@@ -2500,57 +2519,59 @@ static void perf_mmap_unmark_page(void *addr) | |||
2500 | page->mapping = NULL; | 2519 | page->mapping = NULL; |
2501 | } | 2520 | } |
2502 | 2521 | ||
2503 | static void perf_mmap_data_free_work(struct work_struct *work) | 2522 | static void perf_buffer_free_work(struct work_struct *work) |
2504 | { | 2523 | { |
2505 | struct perf_mmap_data *data; | 2524 | struct perf_buffer *buffer; |
2506 | void *base; | 2525 | void *base; |
2507 | int i, nr; | 2526 | int i, nr; |
2508 | 2527 | ||
2509 | data = container_of(work, struct perf_mmap_data, work); | 2528 | buffer = container_of(work, struct perf_buffer, work); |
2510 | nr = 1 << page_order(data); | 2529 | nr = 1 << page_order(buffer); |
2511 | 2530 | ||
2512 | base = data->user_page; | 2531 | base = buffer->user_page; |
2513 | for (i = 0; i < nr + 1; i++) | 2532 | for (i = 0; i < nr + 1; i++) |
2514 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); | 2533 | perf_mmap_unmark_page(base + (i * PAGE_SIZE)); |
2515 | 2534 | ||
2516 | vfree(base); | 2535 | vfree(base); |
2517 | kfree(data); | 2536 | kfree(buffer); |
2518 | } | 2537 | } |
2519 | 2538 | ||
2520 | static void perf_mmap_data_free(struct perf_mmap_data *data) | 2539 | static void perf_buffer_free(struct perf_buffer *buffer) |
2521 | { | 2540 | { |
2522 | schedule_work(&data->work); | 2541 | schedule_work(&buffer->work); |
2523 | } | 2542 | } |
2524 | 2543 | ||
2525 | static struct perf_mmap_data * | 2544 | static struct perf_buffer * |
2526 | perf_mmap_data_alloc(struct perf_event *event, int nr_pages) | 2545 | perf_buffer_alloc(int nr_pages, long watermark, int cpu, int flags) |
2527 | { | 2546 | { |
2528 | struct perf_mmap_data *data; | 2547 | struct perf_buffer *buffer; |
2529 | unsigned long size; | 2548 | unsigned long size; |
2530 | void *all_buf; | 2549 | void *all_buf; |
2531 | 2550 | ||
2532 | size = sizeof(struct perf_mmap_data); | 2551 | size = sizeof(struct perf_buffer); |
2533 | size += sizeof(void *); | 2552 | size += sizeof(void *); |
2534 | 2553 | ||
2535 | data = kzalloc(size, GFP_KERNEL); | 2554 | buffer = kzalloc(size, GFP_KERNEL); |
2536 | if (!data) | 2555 | if (!buffer) |
2537 | goto fail; | 2556 | goto fail; |
2538 | 2557 | ||
2539 | INIT_WORK(&data->work, perf_mmap_data_free_work); | 2558 | INIT_WORK(&buffer->work, perf_buffer_free_work); |
2540 | 2559 | ||
2541 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); | 2560 | all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE); |
2542 | if (!all_buf) | 2561 | if (!all_buf) |
2543 | goto fail_all_buf; | 2562 | goto fail_all_buf; |
2544 | 2563 | ||
2545 | data->user_page = all_buf; | 2564 | buffer->user_page = all_buf; |
2546 | data->data_pages[0] = all_buf + PAGE_SIZE; | 2565 | buffer->data_pages[0] = all_buf + PAGE_SIZE; |
2547 | data->page_order = ilog2(nr_pages); | 2566 | buffer->page_order = ilog2(nr_pages); |
2548 | data->nr_pages = 1; | 2567 | buffer->nr_pages = 1; |
2568 | |||
2569 | perf_buffer_init(buffer, watermark, flags); | ||
2549 | 2570 | ||
2550 | return data; | 2571 | return buffer; |
2551 | 2572 | ||
2552 | fail_all_buf: | 2573 | fail_all_buf: |
2553 | kfree(data); | 2574 | kfree(buffer); |
2554 | 2575 | ||
2555 | fail: | 2576 | fail: |
2556 | return NULL; | 2577 | return NULL; |
@@ -2558,15 +2579,15 @@ fail: | |||
2558 | 2579 | ||
2559 | #endif | 2580 | #endif |
2560 | 2581 | ||
2561 | static unsigned long perf_data_size(struct perf_mmap_data *data) | 2582 | static unsigned long perf_data_size(struct perf_buffer *buffer) |
2562 | { | 2583 | { |
2563 | return data->nr_pages << (PAGE_SHIFT + page_order(data)); | 2584 | return buffer->nr_pages << (PAGE_SHIFT + page_order(buffer)); |
2564 | } | 2585 | } |
2565 | 2586 | ||
2566 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | 2587 | static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) |
2567 | { | 2588 | { |
2568 | struct perf_event *event = vma->vm_file->private_data; | 2589 | struct perf_event *event = vma->vm_file->private_data; |
2569 | struct perf_mmap_data *data; | 2590 | struct perf_buffer *buffer; |
2570 | int ret = VM_FAULT_SIGBUS; | 2591 | int ret = VM_FAULT_SIGBUS; |
2571 | 2592 | ||
2572 | if (vmf->flags & FAULT_FLAG_MKWRITE) { | 2593 | if (vmf->flags & FAULT_FLAG_MKWRITE) { |
@@ -2576,14 +2597,14 @@ static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
2576 | } | 2597 | } |
2577 | 2598 | ||
2578 | rcu_read_lock(); | 2599 | rcu_read_lock(); |
2579 | data = rcu_dereference(event->data); | 2600 | buffer = rcu_dereference(event->buffer); |
2580 | if (!data) | 2601 | if (!buffer) |
2581 | goto unlock; | 2602 | goto unlock; |
2582 | 2603 | ||
2583 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) | 2604 | if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE)) |
2584 | goto unlock; | 2605 | goto unlock; |
2585 | 2606 | ||
2586 | vmf->page = perf_mmap_to_page(data, vmf->pgoff); | 2607 | vmf->page = perf_mmap_to_page(buffer, vmf->pgoff); |
2587 | if (!vmf->page) | 2608 | if (!vmf->page) |
2588 | goto unlock; | 2609 | goto unlock; |
2589 | 2610 | ||
@@ -2598,52 +2619,35 @@ unlock: | |||
2598 | return ret; | 2619 | return ret; |
2599 | } | 2620 | } |
2600 | 2621 | ||
2601 | static void | 2622 | static void perf_buffer_free_rcu(struct rcu_head *rcu_head) |
2602 | perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) | ||
2603 | { | ||
2604 | long max_size = perf_data_size(data); | ||
2605 | |||
2606 | if (event->attr.watermark) { | ||
2607 | data->watermark = min_t(long, max_size, | ||
2608 | event->attr.wakeup_watermark); | ||
2609 | } | ||
2610 | |||
2611 | if (!data->watermark) | ||
2612 | data->watermark = max_size / 2; | ||
2613 | |||
2614 | atomic_set(&data->refcount, 1); | ||
2615 | rcu_assign_pointer(event->data, data); | ||
2616 | } | ||
2617 | |||
2618 | static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) | ||
2619 | { | 2623 | { |
2620 | struct perf_mmap_data *data; | 2624 | struct perf_buffer *buffer; |
2621 | 2625 | ||
2622 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | 2626 | buffer = container_of(rcu_head, struct perf_buffer, rcu_head); |
2623 | perf_mmap_data_free(data); | 2627 | perf_buffer_free(buffer); |
2624 | } | 2628 | } |
2625 | 2629 | ||
2626 | static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event) | 2630 | static struct perf_buffer *perf_buffer_get(struct perf_event *event) |
2627 | { | 2631 | { |
2628 | struct perf_mmap_data *data; | 2632 | struct perf_buffer *buffer; |
2629 | 2633 | ||
2630 | rcu_read_lock(); | 2634 | rcu_read_lock(); |
2631 | data = rcu_dereference(event->data); | 2635 | buffer = rcu_dereference(event->buffer); |
2632 | if (data) { | 2636 | if (buffer) { |
2633 | if (!atomic_inc_not_zero(&data->refcount)) | 2637 | if (!atomic_inc_not_zero(&buffer->refcount)) |
2634 | data = NULL; | 2638 | buffer = NULL; |
2635 | } | 2639 | } |
2636 | rcu_read_unlock(); | 2640 | rcu_read_unlock(); |
2637 | 2641 | ||
2638 | return data; | 2642 | return buffer; |
2639 | } | 2643 | } |
2640 | 2644 | ||
2641 | static void perf_mmap_data_put(struct perf_mmap_data *data) | 2645 | static void perf_buffer_put(struct perf_buffer *buffer) |
2642 | { | 2646 | { |
2643 | if (!atomic_dec_and_test(&data->refcount)) | 2647 | if (!atomic_dec_and_test(&buffer->refcount)) |
2644 | return; | 2648 | return; |
2645 | 2649 | ||
2646 | call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); | 2650 | call_rcu(&buffer->rcu_head, perf_buffer_free_rcu); |
2647 | } | 2651 | } |
2648 | 2652 | ||
2649 | static void perf_mmap_open(struct vm_area_struct *vma) | 2653 | static void perf_mmap_open(struct vm_area_struct *vma) |
@@ -2658,16 +2662,16 @@ static void perf_mmap_close(struct vm_area_struct *vma) | |||
2658 | struct perf_event *event = vma->vm_file->private_data; | 2662 | struct perf_event *event = vma->vm_file->private_data; |
2659 | 2663 | ||
2660 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { | 2664 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { |
2661 | unsigned long size = perf_data_size(event->data); | 2665 | unsigned long size = perf_data_size(event->buffer); |
2662 | struct user_struct *user = event->mmap_user; | 2666 | struct user_struct *user = event->mmap_user; |
2663 | struct perf_mmap_data *data = event->data; | 2667 | struct perf_buffer *buffer = event->buffer; |
2664 | 2668 | ||
2665 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); | 2669 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
2666 | vma->vm_mm->locked_vm -= event->mmap_locked; | 2670 | vma->vm_mm->locked_vm -= event->mmap_locked; |
2667 | rcu_assign_pointer(event->data, NULL); | 2671 | rcu_assign_pointer(event->buffer, NULL); |
2668 | mutex_unlock(&event->mmap_mutex); | 2672 | mutex_unlock(&event->mmap_mutex); |
2669 | 2673 | ||
2670 | perf_mmap_data_put(data); | 2674 | perf_buffer_put(buffer); |
2671 | free_uid(user); | 2675 | free_uid(user); |
2672 | } | 2676 | } |
2673 | } | 2677 | } |
@@ -2685,11 +2689,11 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2685 | unsigned long user_locked, user_lock_limit; | 2689 | unsigned long user_locked, user_lock_limit; |
2686 | struct user_struct *user = current_user(); | 2690 | struct user_struct *user = current_user(); |
2687 | unsigned long locked, lock_limit; | 2691 | unsigned long locked, lock_limit; |
2688 | struct perf_mmap_data *data; | 2692 | struct perf_buffer *buffer; |
2689 | unsigned long vma_size; | 2693 | unsigned long vma_size; |
2690 | unsigned long nr_pages; | 2694 | unsigned long nr_pages; |
2691 | long user_extra, extra; | 2695 | long user_extra, extra; |
2692 | int ret = 0; | 2696 | int ret = 0, flags = 0; |
2693 | 2697 | ||
2694 | /* | 2698 | /* |
2695 | * Don't allow mmap() of inherited per-task counters. This would | 2699 | * Don't allow mmap() of inherited per-task counters. This would |
@@ -2706,7 +2710,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2706 | nr_pages = (vma_size / PAGE_SIZE) - 1; | 2710 | nr_pages = (vma_size / PAGE_SIZE) - 1; |
2707 | 2711 | ||
2708 | /* | 2712 | /* |
2709 | * If we have data pages ensure they're a power-of-two number, so we | 2713 | * If we have buffer pages ensure they're a power-of-two number, so we |
2710 | * can do bitmasks instead of modulo. | 2714 | * can do bitmasks instead of modulo. |
2711 | */ | 2715 | */ |
2712 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) | 2716 | if (nr_pages != 0 && !is_power_of_2(nr_pages)) |
@@ -2720,9 +2724,9 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2720 | 2724 | ||
2721 | WARN_ON_ONCE(event->ctx->parent_ctx); | 2725 | WARN_ON_ONCE(event->ctx->parent_ctx); |
2722 | mutex_lock(&event->mmap_mutex); | 2726 | mutex_lock(&event->mmap_mutex); |
2723 | if (event->data) { | 2727 | if (event->buffer) { |
2724 | if (event->data->nr_pages == nr_pages) | 2728 | if (event->buffer->nr_pages == nr_pages) |
2725 | atomic_inc(&event->data->refcount); | 2729 | atomic_inc(&event->buffer->refcount); |
2726 | else | 2730 | else |
2727 | ret = -EINVAL; | 2731 | ret = -EINVAL; |
2728 | goto unlock; | 2732 | goto unlock; |
@@ -2752,17 +2756,18 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) | |||
2752 | goto unlock; | 2756 | goto unlock; |
2753 | } | 2757 | } |
2754 | 2758 | ||
2755 | WARN_ON(event->data); | 2759 | WARN_ON(event->buffer); |
2760 | |||
2761 | if (vma->vm_flags & VM_WRITE) | ||
2762 | flags |= PERF_BUFFER_WRITABLE; | ||
2756 | 2763 | ||
2757 | data = perf_mmap_data_alloc(event, nr_pages); | 2764 | buffer = perf_buffer_alloc(nr_pages, event->attr.wakeup_watermark, |
2758 | if (!data) { | 2765 | event->cpu, flags); |
2766 | if (!buffer) { | ||
2759 | ret = -ENOMEM; | 2767 | ret = -ENOMEM; |
2760 | goto unlock; | 2768 | goto unlock; |
2761 | } | 2769 | } |
2762 | 2770 | rcu_assign_pointer(event->buffer, buffer); | |
2763 | perf_mmap_data_init(event, data); | ||
2764 | if (vma->vm_flags & VM_WRITE) | ||
2765 | event->data->writable = 1; | ||
2766 | 2771 | ||
2767 | atomic_long_add(user_extra, &user->locked_vm); | 2772 | atomic_long_add(user_extra, &user->locked_vm); |
2768 | event->mmap_locked = extra; | 2773 | event->mmap_locked = extra; |
@@ -2941,11 +2946,6 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | |||
2941 | return NULL; | 2946 | return NULL; |
2942 | } | 2947 | } |
2943 | 2948 | ||
2944 | __weak | ||
2945 | void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip) | ||
2946 | { | ||
2947 | } | ||
2948 | |||
2949 | 2949 | ||
2950 | /* | 2950 | /* |
2951 | * We assume there is only KVM supporting the callbacks. | 2951 | * We assume there is only KVM supporting the callbacks. |
@@ -2971,15 +2971,15 @@ EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); | |||
2971 | /* | 2971 | /* |
2972 | * Output | 2972 | * Output |
2973 | */ | 2973 | */ |
2974 | static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | 2974 | static bool perf_output_space(struct perf_buffer *buffer, unsigned long tail, |
2975 | unsigned long offset, unsigned long head) | 2975 | unsigned long offset, unsigned long head) |
2976 | { | 2976 | { |
2977 | unsigned long mask; | 2977 | unsigned long mask; |
2978 | 2978 | ||
2979 | if (!data->writable) | 2979 | if (!buffer->writable) |
2980 | return true; | 2980 | return true; |
2981 | 2981 | ||
2982 | mask = perf_data_size(data) - 1; | 2982 | mask = perf_data_size(buffer) - 1; |
2983 | 2983 | ||
2984 | offset = (offset - tail) & mask; | 2984 | offset = (offset - tail) & mask; |
2985 | head = (head - tail) & mask; | 2985 | head = (head - tail) & mask; |
@@ -2992,7 +2992,7 @@ static bool perf_output_space(struct perf_mmap_data *data, unsigned long tail, | |||
2992 | 2992 | ||
2993 | static void perf_output_wakeup(struct perf_output_handle *handle) | 2993 | static void perf_output_wakeup(struct perf_output_handle *handle) |
2994 | { | 2994 | { |
2995 | atomic_set(&handle->data->poll, POLL_IN); | 2995 | atomic_set(&handle->buffer->poll, POLL_IN); |
2996 | 2996 | ||
2997 | if (handle->nmi) { | 2997 | if (handle->nmi) { |
2998 | handle->event->pending_wakeup = 1; | 2998 | handle->event->pending_wakeup = 1; |
@@ -3012,45 +3012,45 @@ static void perf_output_wakeup(struct perf_output_handle *handle) | |||
3012 | */ | 3012 | */ |
3013 | static void perf_output_get_handle(struct perf_output_handle *handle) | 3013 | static void perf_output_get_handle(struct perf_output_handle *handle) |
3014 | { | 3014 | { |
3015 | struct perf_mmap_data *data = handle->data; | 3015 | struct perf_buffer *buffer = handle->buffer; |
3016 | 3016 | ||
3017 | preempt_disable(); | 3017 | preempt_disable(); |
3018 | local_inc(&data->nest); | 3018 | local_inc(&buffer->nest); |
3019 | handle->wakeup = local_read(&data->wakeup); | 3019 | handle->wakeup = local_read(&buffer->wakeup); |
3020 | } | 3020 | } |
3021 | 3021 | ||
3022 | static void perf_output_put_handle(struct perf_output_handle *handle) | 3022 | static void perf_output_put_handle(struct perf_output_handle *handle) |
3023 | { | 3023 | { |
3024 | struct perf_mmap_data *data = handle->data; | 3024 | struct perf_buffer *buffer = handle->buffer; |
3025 | unsigned long head; | 3025 | unsigned long head; |
3026 | 3026 | ||
3027 | again: | 3027 | again: |
3028 | head = local_read(&data->head); | 3028 | head = local_read(&buffer->head); |
3029 | 3029 | ||
3030 | /* | 3030 | /* |
3031 | * IRQ/NMI can happen here, which means we can miss a head update. | 3031 | * IRQ/NMI can happen here, which means we can miss a head update. |
3032 | */ | 3032 | */ |
3033 | 3033 | ||
3034 | if (!local_dec_and_test(&data->nest)) | 3034 | if (!local_dec_and_test(&buffer->nest)) |
3035 | goto out; | 3035 | goto out; |
3036 | 3036 | ||
3037 | /* | 3037 | /* |
3038 | * Publish the known good head. Rely on the full barrier implied | 3038 | * Publish the known good head. Rely on the full barrier implied |
3039 | * by atomic_dec_and_test() order the data->head read and this | 3039 | * by atomic_dec_and_test() order the buffer->head read and this |
3040 | * write. | 3040 | * write. |
3041 | */ | 3041 | */ |
3042 | data->user_page->data_head = head; | 3042 | buffer->user_page->data_head = head; |
3043 | 3043 | ||
3044 | /* | 3044 | /* |
3045 | * Now check if we missed an update, rely on the (compiler) | 3045 | * Now check if we missed an update, rely on the (compiler) |
3046 | * barrier in atomic_dec_and_test() to re-read data->head. | 3046 | * barrier in atomic_dec_and_test() to re-read buffer->head. |
3047 | */ | 3047 | */ |
3048 | if (unlikely(head != local_read(&data->head))) { | 3048 | if (unlikely(head != local_read(&buffer->head))) { |
3049 | local_inc(&data->nest); | 3049 | local_inc(&buffer->nest); |
3050 | goto again; | 3050 | goto again; |
3051 | } | 3051 | } |
3052 | 3052 | ||
3053 | if (handle->wakeup != local_read(&data->wakeup)) | 3053 | if (handle->wakeup != local_read(&buffer->wakeup)) |
3054 | perf_output_wakeup(handle); | 3054 | perf_output_wakeup(handle); |
3055 | 3055 | ||
3056 | out: | 3056 | out: |
@@ -3070,12 +3070,12 @@ __always_inline void perf_output_copy(struct perf_output_handle *handle, | |||
3070 | buf += size; | 3070 | buf += size; |
3071 | handle->size -= size; | 3071 | handle->size -= size; |
3072 | if (!handle->size) { | 3072 | if (!handle->size) { |
3073 | struct perf_mmap_data *data = handle->data; | 3073 | struct perf_buffer *buffer = handle->buffer; |
3074 | 3074 | ||
3075 | handle->page++; | 3075 | handle->page++; |
3076 | handle->page &= data->nr_pages - 1; | 3076 | handle->page &= buffer->nr_pages - 1; |
3077 | handle->addr = data->data_pages[handle->page]; | 3077 | handle->addr = buffer->data_pages[handle->page]; |
3078 | handle->size = PAGE_SIZE << page_order(data); | 3078 | handle->size = PAGE_SIZE << page_order(buffer); |
3079 | } | 3079 | } |
3080 | } while (len); | 3080 | } while (len); |
3081 | } | 3081 | } |
@@ -3084,7 +3084,7 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3084 | struct perf_event *event, unsigned int size, | 3084 | struct perf_event *event, unsigned int size, |
3085 | int nmi, int sample) | 3085 | int nmi, int sample) |
3086 | { | 3086 | { |
3087 | struct perf_mmap_data *data; | 3087 | struct perf_buffer *buffer; |
3088 | unsigned long tail, offset, head; | 3088 | unsigned long tail, offset, head; |
3089 | int have_lost; | 3089 | int have_lost; |
3090 | struct { | 3090 | struct { |
@@ -3100,19 +3100,19 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3100 | if (event->parent) | 3100 | if (event->parent) |
3101 | event = event->parent; | 3101 | event = event->parent; |
3102 | 3102 | ||
3103 | data = rcu_dereference(event->data); | 3103 | buffer = rcu_dereference(event->buffer); |
3104 | if (!data) | 3104 | if (!buffer) |
3105 | goto out; | 3105 | goto out; |
3106 | 3106 | ||
3107 | handle->data = data; | 3107 | handle->buffer = buffer; |
3108 | handle->event = event; | 3108 | handle->event = event; |
3109 | handle->nmi = nmi; | 3109 | handle->nmi = nmi; |
3110 | handle->sample = sample; | 3110 | handle->sample = sample; |
3111 | 3111 | ||
3112 | if (!data->nr_pages) | 3112 | if (!buffer->nr_pages) |
3113 | goto out; | 3113 | goto out; |
3114 | 3114 | ||
3115 | have_lost = local_read(&data->lost); | 3115 | have_lost = local_read(&buffer->lost); |
3116 | if (have_lost) | 3116 | if (have_lost) |
3117 | size += sizeof(lost_event); | 3117 | size += sizeof(lost_event); |
3118 | 3118 | ||
@@ -3124,30 +3124,30 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3124 | * tail pointer. So that all reads will be completed before the | 3124 | * tail pointer. So that all reads will be completed before the |
3125 | * write is issued. | 3125 | * write is issued. |
3126 | */ | 3126 | */ |
3127 | tail = ACCESS_ONCE(data->user_page->data_tail); | 3127 | tail = ACCESS_ONCE(buffer->user_page->data_tail); |
3128 | smp_rmb(); | 3128 | smp_rmb(); |
3129 | offset = head = local_read(&data->head); | 3129 | offset = head = local_read(&buffer->head); |
3130 | head += size; | 3130 | head += size; |
3131 | if (unlikely(!perf_output_space(data, tail, offset, head))) | 3131 | if (unlikely(!perf_output_space(buffer, tail, offset, head))) |
3132 | goto fail; | 3132 | goto fail; |
3133 | } while (local_cmpxchg(&data->head, offset, head) != offset); | 3133 | } while (local_cmpxchg(&buffer->head, offset, head) != offset); |
3134 | 3134 | ||
3135 | if (head - local_read(&data->wakeup) > data->watermark) | 3135 | if (head - local_read(&buffer->wakeup) > buffer->watermark) |
3136 | local_add(data->watermark, &data->wakeup); | 3136 | local_add(buffer->watermark, &buffer->wakeup); |
3137 | 3137 | ||
3138 | handle->page = offset >> (PAGE_SHIFT + page_order(data)); | 3138 | handle->page = offset >> (PAGE_SHIFT + page_order(buffer)); |
3139 | handle->page &= data->nr_pages - 1; | 3139 | handle->page &= buffer->nr_pages - 1; |
3140 | handle->size = offset & ((PAGE_SIZE << page_order(data)) - 1); | 3140 | handle->size = offset & ((PAGE_SIZE << page_order(buffer)) - 1); |
3141 | handle->addr = data->data_pages[handle->page]; | 3141 | handle->addr = buffer->data_pages[handle->page]; |
3142 | handle->addr += handle->size; | 3142 | handle->addr += handle->size; |
3143 | handle->size = (PAGE_SIZE << page_order(data)) - handle->size; | 3143 | handle->size = (PAGE_SIZE << page_order(buffer)) - handle->size; |
3144 | 3144 | ||
3145 | if (have_lost) { | 3145 | if (have_lost) { |
3146 | lost_event.header.type = PERF_RECORD_LOST; | 3146 | lost_event.header.type = PERF_RECORD_LOST; |
3147 | lost_event.header.misc = 0; | 3147 | lost_event.header.misc = 0; |
3148 | lost_event.header.size = sizeof(lost_event); | 3148 | lost_event.header.size = sizeof(lost_event); |
3149 | lost_event.id = event->id; | 3149 | lost_event.id = event->id; |
3150 | lost_event.lost = local_xchg(&data->lost, 0); | 3150 | lost_event.lost = local_xchg(&buffer->lost, 0); |
3151 | 3151 | ||
3152 | perf_output_put(handle, lost_event); | 3152 | perf_output_put(handle, lost_event); |
3153 | } | 3153 | } |
@@ -3155,7 +3155,7 @@ int perf_output_begin(struct perf_output_handle *handle, | |||
3155 | return 0; | 3155 | return 0; |
3156 | 3156 | ||
3157 | fail: | 3157 | fail: |
3158 | local_inc(&data->lost); | 3158 | local_inc(&buffer->lost); |
3159 | perf_output_put_handle(handle); | 3159 | perf_output_put_handle(handle); |
3160 | out: | 3160 | out: |
3161 | rcu_read_unlock(); | 3161 | rcu_read_unlock(); |
@@ -3166,15 +3166,15 @@ out: | |||
3166 | void perf_output_end(struct perf_output_handle *handle) | 3166 | void perf_output_end(struct perf_output_handle *handle) |
3167 | { | 3167 | { |
3168 | struct perf_event *event = handle->event; | 3168 | struct perf_event *event = handle->event; |
3169 | struct perf_mmap_data *data = handle->data; | 3169 | struct perf_buffer *buffer = handle->buffer; |
3170 | 3170 | ||
3171 | int wakeup_events = event->attr.wakeup_events; | 3171 | int wakeup_events = event->attr.wakeup_events; |
3172 | 3172 | ||
3173 | if (handle->sample && wakeup_events) { | 3173 | if (handle->sample && wakeup_events) { |
3174 | int events = local_inc_return(&data->events); | 3174 | int events = local_inc_return(&buffer->events); |
3175 | if (events >= wakeup_events) { | 3175 | if (events >= wakeup_events) { |
3176 | local_sub(wakeup_events, &data->events); | 3176 | local_sub(wakeup_events, &buffer->events); |
3177 | local_inc(&data->wakeup); | 3177 | local_inc(&buffer->wakeup); |
3178 | } | 3178 | } |
3179 | } | 3179 | } |
3180 | 3180 | ||
@@ -3211,7 +3211,7 @@ static void perf_output_read_one(struct perf_output_handle *handle, | |||
3211 | u64 values[4]; | 3211 | u64 values[4]; |
3212 | int n = 0; | 3212 | int n = 0; |
3213 | 3213 | ||
3214 | values[n++] = atomic64_read(&event->count); | 3214 | values[n++] = perf_event_count(event); |
3215 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 3215 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
3216 | values[n++] = event->total_time_enabled + | 3216 | values[n++] = event->total_time_enabled + |
3217 | atomic64_read(&event->child_total_time_enabled); | 3217 | atomic64_read(&event->child_total_time_enabled); |
@@ -3248,7 +3248,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
3248 | if (leader != event) | 3248 | if (leader != event) |
3249 | leader->pmu->read(leader); | 3249 | leader->pmu->read(leader); |
3250 | 3250 | ||
3251 | values[n++] = atomic64_read(&leader->count); | 3251 | values[n++] = perf_event_count(leader); |
3252 | if (read_format & PERF_FORMAT_ID) | 3252 | if (read_format & PERF_FORMAT_ID) |
3253 | values[n++] = primary_event_id(leader); | 3253 | values[n++] = primary_event_id(leader); |
3254 | 3254 | ||
@@ -3260,7 +3260,7 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
3260 | if (sub != event) | 3260 | if (sub != event) |
3261 | sub->pmu->read(sub); | 3261 | sub->pmu->read(sub); |
3262 | 3262 | ||
3263 | values[n++] = atomic64_read(&sub->count); | 3263 | values[n++] = perf_event_count(sub); |
3264 | if (read_format & PERF_FORMAT_ID) | 3264 | if (read_format & PERF_FORMAT_ID) |
3265 | values[n++] = primary_event_id(sub); | 3265 | values[n++] = primary_event_id(sub); |
3266 | 3266 | ||
@@ -3491,7 +3491,7 @@ perf_event_read_event(struct perf_event *event, | |||
3491 | /* | 3491 | /* |
3492 | * task tracking -- fork/exit | 3492 | * task tracking -- fork/exit |
3493 | * | 3493 | * |
3494 | * enabled by: attr.comm | attr.mmap | attr.task | 3494 | * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task |
3495 | */ | 3495 | */ |
3496 | 3496 | ||
3497 | struct perf_task_event { | 3497 | struct perf_task_event { |
@@ -3541,7 +3541,8 @@ static int perf_event_task_match(struct perf_event *event) | |||
3541 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3541 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3542 | return 0; | 3542 | return 0; |
3543 | 3543 | ||
3544 | if (event->attr.comm || event->attr.mmap || event->attr.task) | 3544 | if (event->attr.comm || event->attr.mmap || |
3545 | event->attr.mmap_data || event->attr.task) | ||
3545 | return 1; | 3546 | return 1; |
3546 | 3547 | ||
3547 | return 0; | 3548 | return 0; |
@@ -3766,7 +3767,8 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
3766 | } | 3767 | } |
3767 | 3768 | ||
3768 | static int perf_event_mmap_match(struct perf_event *event, | 3769 | static int perf_event_mmap_match(struct perf_event *event, |
3769 | struct perf_mmap_event *mmap_event) | 3770 | struct perf_mmap_event *mmap_event, |
3771 | int executable) | ||
3770 | { | 3772 | { |
3771 | if (event->state < PERF_EVENT_STATE_INACTIVE) | 3773 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
3772 | return 0; | 3774 | return 0; |
@@ -3774,19 +3776,21 @@ static int perf_event_mmap_match(struct perf_event *event, | |||
3774 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3776 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
3775 | return 0; | 3777 | return 0; |
3776 | 3778 | ||
3777 | if (event->attr.mmap) | 3779 | if ((!executable && event->attr.mmap_data) || |
3780 | (executable && event->attr.mmap)) | ||
3778 | return 1; | 3781 | return 1; |
3779 | 3782 | ||
3780 | return 0; | 3783 | return 0; |
3781 | } | 3784 | } |
3782 | 3785 | ||
3783 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, | 3786 | static void perf_event_mmap_ctx(struct perf_event_context *ctx, |
3784 | struct perf_mmap_event *mmap_event) | 3787 | struct perf_mmap_event *mmap_event, |
3788 | int executable) | ||
3785 | { | 3789 | { |
3786 | struct perf_event *event; | 3790 | struct perf_event *event; |
3787 | 3791 | ||
3788 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { | 3792 | list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { |
3789 | if (perf_event_mmap_match(event, mmap_event)) | 3793 | if (perf_event_mmap_match(event, mmap_event, executable)) |
3790 | perf_event_mmap_output(event, mmap_event); | 3794 | perf_event_mmap_output(event, mmap_event); |
3791 | } | 3795 | } |
3792 | } | 3796 | } |
@@ -3830,6 +3834,14 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
3830 | if (!vma->vm_mm) { | 3834 | if (!vma->vm_mm) { |
3831 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); | 3835 | name = strncpy(tmp, "[vdso]", sizeof(tmp)); |
3832 | goto got_name; | 3836 | goto got_name; |
3837 | } else if (vma->vm_start <= vma->vm_mm->start_brk && | ||
3838 | vma->vm_end >= vma->vm_mm->brk) { | ||
3839 | name = strncpy(tmp, "[heap]", sizeof(tmp)); | ||
3840 | goto got_name; | ||
3841 | } else if (vma->vm_start <= vma->vm_mm->start_stack && | ||
3842 | vma->vm_end >= vma->vm_mm->start_stack) { | ||
3843 | name = strncpy(tmp, "[stack]", sizeof(tmp)); | ||
3844 | goto got_name; | ||
3833 | } | 3845 | } |
3834 | 3846 | ||
3835 | name = strncpy(tmp, "//anon", sizeof(tmp)); | 3847 | name = strncpy(tmp, "//anon", sizeof(tmp)); |
@@ -3846,17 +3858,17 @@ got_name: | |||
3846 | 3858 | ||
3847 | rcu_read_lock(); | 3859 | rcu_read_lock(); |
3848 | cpuctx = &get_cpu_var(perf_cpu_context); | 3860 | cpuctx = &get_cpu_var(perf_cpu_context); |
3849 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event); | 3861 | perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); |
3850 | ctx = rcu_dereference(current->perf_event_ctxp); | 3862 | ctx = rcu_dereference(current->perf_event_ctxp); |
3851 | if (ctx) | 3863 | if (ctx) |
3852 | perf_event_mmap_ctx(ctx, mmap_event); | 3864 | perf_event_mmap_ctx(ctx, mmap_event, vma->vm_flags & VM_EXEC); |
3853 | put_cpu_var(perf_cpu_context); | 3865 | put_cpu_var(perf_cpu_context); |
3854 | rcu_read_unlock(); | 3866 | rcu_read_unlock(); |
3855 | 3867 | ||
3856 | kfree(buf); | 3868 | kfree(buf); |
3857 | } | 3869 | } |
3858 | 3870 | ||
3859 | void __perf_event_mmap(struct vm_area_struct *vma) | 3871 | void perf_event_mmap(struct vm_area_struct *vma) |
3860 | { | 3872 | { |
3861 | struct perf_mmap_event mmap_event; | 3873 | struct perf_mmap_event mmap_event; |
3862 | 3874 | ||
@@ -4018,14 +4030,14 @@ static u64 perf_swevent_set_period(struct perf_event *event) | |||
4018 | hwc->last_period = hwc->sample_period; | 4030 | hwc->last_period = hwc->sample_period; |
4019 | 4031 | ||
4020 | again: | 4032 | again: |
4021 | old = val = atomic64_read(&hwc->period_left); | 4033 | old = val = local64_read(&hwc->period_left); |
4022 | if (val < 0) | 4034 | if (val < 0) |
4023 | return 0; | 4035 | return 0; |
4024 | 4036 | ||
4025 | nr = div64_u64(period + val, period); | 4037 | nr = div64_u64(period + val, period); |
4026 | offset = nr * period; | 4038 | offset = nr * period; |
4027 | val -= offset; | 4039 | val -= offset; |
4028 | if (atomic64_cmpxchg(&hwc->period_left, old, val) != old) | 4040 | if (local64_cmpxchg(&hwc->period_left, old, val) != old) |
4029 | goto again; | 4041 | goto again; |
4030 | 4042 | ||
4031 | return nr; | 4043 | return nr; |
@@ -4064,7 +4076,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
4064 | { | 4076 | { |
4065 | struct hw_perf_event *hwc = &event->hw; | 4077 | struct hw_perf_event *hwc = &event->hw; |
4066 | 4078 | ||
4067 | atomic64_add(nr, &event->count); | 4079 | local64_add(nr, &event->count); |
4068 | 4080 | ||
4069 | if (!regs) | 4081 | if (!regs) |
4070 | return; | 4082 | return; |
@@ -4075,7 +4087,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr, | |||
4075 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) | 4087 | if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) |
4076 | return perf_swevent_overflow(event, 1, nmi, data, regs); | 4088 | return perf_swevent_overflow(event, 1, nmi, data, regs); |
4077 | 4089 | ||
4078 | if (atomic64_add_negative(nr, &hwc->period_left)) | 4090 | if (local64_add_negative(nr, &hwc->period_left)) |
4079 | return; | 4091 | return; |
4080 | 4092 | ||
4081 | perf_swevent_overflow(event, 0, nmi, data, regs); | 4093 | perf_swevent_overflow(event, 0, nmi, data, regs); |
@@ -4213,14 +4225,12 @@ int perf_swevent_get_recursion_context(void) | |||
4213 | } | 4225 | } |
4214 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); | 4226 | EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context); |
4215 | 4227 | ||
4216 | void perf_swevent_put_recursion_context(int rctx) | 4228 | void inline perf_swevent_put_recursion_context(int rctx) |
4217 | { | 4229 | { |
4218 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | 4230 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); |
4219 | barrier(); | 4231 | barrier(); |
4220 | cpuctx->recursion[rctx]--; | 4232 | cpuctx->recursion[rctx]--; |
4221 | } | 4233 | } |
4222 | EXPORT_SYMBOL_GPL(perf_swevent_put_recursion_context); | ||
4223 | |||
4224 | 4234 | ||
4225 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 4235 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, |
4226 | struct pt_regs *regs, u64 addr) | 4236 | struct pt_regs *regs, u64 addr) |
@@ -4368,8 +4378,8 @@ static void cpu_clock_perf_event_update(struct perf_event *event) | |||
4368 | u64 now; | 4378 | u64 now; |
4369 | 4379 | ||
4370 | now = cpu_clock(cpu); | 4380 | now = cpu_clock(cpu); |
4371 | prev = atomic64_xchg(&event->hw.prev_count, now); | 4381 | prev = local64_xchg(&event->hw.prev_count, now); |
4372 | atomic64_add(now - prev, &event->count); | 4382 | local64_add(now - prev, &event->count); |
4373 | } | 4383 | } |
4374 | 4384 | ||
4375 | static int cpu_clock_perf_event_enable(struct perf_event *event) | 4385 | static int cpu_clock_perf_event_enable(struct perf_event *event) |
@@ -4377,7 +4387,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event) | |||
4377 | struct hw_perf_event *hwc = &event->hw; | 4387 | struct hw_perf_event *hwc = &event->hw; |
4378 | int cpu = raw_smp_processor_id(); | 4388 | int cpu = raw_smp_processor_id(); |
4379 | 4389 | ||
4380 | atomic64_set(&hwc->prev_count, cpu_clock(cpu)); | 4390 | local64_set(&hwc->prev_count, cpu_clock(cpu)); |
4381 | perf_swevent_start_hrtimer(event); | 4391 | perf_swevent_start_hrtimer(event); |
4382 | 4392 | ||
4383 | return 0; | 4393 | return 0; |
@@ -4409,9 +4419,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now) | |||
4409 | u64 prev; | 4419 | u64 prev; |
4410 | s64 delta; | 4420 | s64 delta; |
4411 | 4421 | ||
4412 | prev = atomic64_xchg(&event->hw.prev_count, now); | 4422 | prev = local64_xchg(&event->hw.prev_count, now); |
4413 | delta = now - prev; | 4423 | delta = now - prev; |
4414 | atomic64_add(delta, &event->count); | 4424 | local64_add(delta, &event->count); |
4415 | } | 4425 | } |
4416 | 4426 | ||
4417 | static int task_clock_perf_event_enable(struct perf_event *event) | 4427 | static int task_clock_perf_event_enable(struct perf_event *event) |
@@ -4421,7 +4431,7 @@ static int task_clock_perf_event_enable(struct perf_event *event) | |||
4421 | 4431 | ||
4422 | now = event->ctx->time; | 4432 | now = event->ctx->time; |
4423 | 4433 | ||
4424 | atomic64_set(&hwc->prev_count, now); | 4434 | local64_set(&hwc->prev_count, now); |
4425 | 4435 | ||
4426 | perf_swevent_start_hrtimer(event); | 4436 | perf_swevent_start_hrtimer(event); |
4427 | 4437 | ||
@@ -4601,7 +4611,7 @@ static int perf_tp_event_match(struct perf_event *event, | |||
4601 | } | 4611 | } |
4602 | 4612 | ||
4603 | void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | 4613 | void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, |
4604 | struct pt_regs *regs, struct hlist_head *head) | 4614 | struct pt_regs *regs, struct hlist_head *head, int rctx) |
4605 | { | 4615 | { |
4606 | struct perf_sample_data data; | 4616 | struct perf_sample_data data; |
4607 | struct perf_event *event; | 4617 | struct perf_event *event; |
@@ -4615,12 +4625,12 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, | |||
4615 | perf_sample_data_init(&data, addr); | 4625 | perf_sample_data_init(&data, addr); |
4616 | data.raw = &raw; | 4626 | data.raw = &raw; |
4617 | 4627 | ||
4618 | rcu_read_lock(); | ||
4619 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { | 4628 | hlist_for_each_entry_rcu(event, node, head, hlist_entry) { |
4620 | if (perf_tp_event_match(event, &data, regs)) | 4629 | if (perf_tp_event_match(event, &data, regs)) |
4621 | perf_swevent_add(event, count, 1, &data, regs); | 4630 | perf_swevent_add(event, count, 1, &data, regs); |
4622 | } | 4631 | } |
4623 | rcu_read_unlock(); | 4632 | |
4633 | perf_swevent_put_recursion_context(rctx); | ||
4624 | } | 4634 | } |
4625 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4635 | EXPORT_SYMBOL_GPL(perf_tp_event); |
4626 | 4636 | ||
@@ -4864,7 +4874,7 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4864 | hwc->sample_period = 1; | 4874 | hwc->sample_period = 1; |
4865 | hwc->last_period = hwc->sample_period; | 4875 | hwc->last_period = hwc->sample_period; |
4866 | 4876 | ||
4867 | atomic64_set(&hwc->period_left, hwc->sample_period); | 4877 | local64_set(&hwc->period_left, hwc->sample_period); |
4868 | 4878 | ||
4869 | /* | 4879 | /* |
4870 | * we currently do not support PERF_FORMAT_GROUP on inherited events | 4880 | * we currently do not support PERF_FORMAT_GROUP on inherited events |
@@ -4913,7 +4923,7 @@ done: | |||
4913 | 4923 | ||
4914 | if (!event->parent) { | 4924 | if (!event->parent) { |
4915 | atomic_inc(&nr_events); | 4925 | atomic_inc(&nr_events); |
4916 | if (event->attr.mmap) | 4926 | if (event->attr.mmap || event->attr.mmap_data) |
4917 | atomic_inc(&nr_mmap_events); | 4927 | atomic_inc(&nr_mmap_events); |
4918 | if (event->attr.comm) | 4928 | if (event->attr.comm) |
4919 | atomic_inc(&nr_comm_events); | 4929 | atomic_inc(&nr_comm_events); |
@@ -5007,7 +5017,7 @@ err_size: | |||
5007 | static int | 5017 | static int |
5008 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) | 5018 | perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
5009 | { | 5019 | { |
5010 | struct perf_mmap_data *data = NULL, *old_data = NULL; | 5020 | struct perf_buffer *buffer = NULL, *old_buffer = NULL; |
5011 | int ret = -EINVAL; | 5021 | int ret = -EINVAL; |
5012 | 5022 | ||
5013 | if (!output_event) | 5023 | if (!output_event) |
@@ -5037,19 +5047,19 @@ set: | |||
5037 | 5047 | ||
5038 | if (output_event) { | 5048 | if (output_event) { |
5039 | /* get the buffer we want to redirect to */ | 5049 | /* get the buffer we want to redirect to */ |
5040 | data = perf_mmap_data_get(output_event); | 5050 | buffer = perf_buffer_get(output_event); |
5041 | if (!data) | 5051 | if (!buffer) |
5042 | goto unlock; | 5052 | goto unlock; |
5043 | } | 5053 | } |
5044 | 5054 | ||
5045 | old_data = event->data; | 5055 | old_buffer = event->buffer; |
5046 | rcu_assign_pointer(event->data, data); | 5056 | rcu_assign_pointer(event->buffer, buffer); |
5047 | ret = 0; | 5057 | ret = 0; |
5048 | unlock: | 5058 | unlock: |
5049 | mutex_unlock(&event->mmap_mutex); | 5059 | mutex_unlock(&event->mmap_mutex); |
5050 | 5060 | ||
5051 | if (old_data) | 5061 | if (old_buffer) |
5052 | perf_mmap_data_put(old_data); | 5062 | perf_buffer_put(old_buffer); |
5053 | out: | 5063 | out: |
5054 | return ret; | 5064 | return ret; |
5055 | } | 5065 | } |
@@ -5298,7 +5308,7 @@ inherit_event(struct perf_event *parent_event, | |||
5298 | hwc->sample_period = sample_period; | 5308 | hwc->sample_period = sample_period; |
5299 | hwc->last_period = sample_period; | 5309 | hwc->last_period = sample_period; |
5300 | 5310 | ||
5301 | atomic64_set(&hwc->period_left, sample_period); | 5311 | local64_set(&hwc->period_left, sample_period); |
5302 | } | 5312 | } |
5303 | 5313 | ||
5304 | child_event->overflow_handler = parent_event->overflow_handler; | 5314 | child_event->overflow_handler = parent_event->overflow_handler; |
@@ -5359,12 +5369,12 @@ static void sync_child_event(struct perf_event *child_event, | |||
5359 | if (child_event->attr.inherit_stat) | 5369 | if (child_event->attr.inherit_stat) |
5360 | perf_event_read_event(child_event, child); | 5370 | perf_event_read_event(child_event, child); |
5361 | 5371 | ||
5362 | child_val = atomic64_read(&child_event->count); | 5372 | child_val = perf_event_count(child_event); |
5363 | 5373 | ||
5364 | /* | 5374 | /* |
5365 | * Add back the child's count to the parent's count: | 5375 | * Add back the child's count to the parent's count: |
5366 | */ | 5376 | */ |
5367 | atomic64_add(child_val, &parent_event->count); | 5377 | atomic64_add(child_val, &parent_event->child_count); |
5368 | atomic64_add(child_event->total_time_enabled, | 5378 | atomic64_add(child_event->total_time_enabled, |
5369 | &parent_event->child_total_time_enabled); | 5379 | &parent_event->child_total_time_enabled); |
5370 | atomic64_add(child_event->total_time_running, | 5380 | atomic64_add(child_event->total_time_running, |
diff --git a/kernel/sched.c b/kernel/sched.c index f52a8801b7a2..265cf3a2b5d8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3726,7 +3726,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
3726 | * off of preempt_enable. Kernel preemptions off return from interrupt | 3726 | * off of preempt_enable. Kernel preemptions off return from interrupt |
3727 | * occur there and call schedule directly. | 3727 | * occur there and call schedule directly. |
3728 | */ | 3728 | */ |
3729 | asmlinkage void __sched preempt_schedule(void) | 3729 | asmlinkage void __sched notrace preempt_schedule(void) |
3730 | { | 3730 | { |
3731 | struct thread_info *ti = current_thread_info(); | 3731 | struct thread_info *ti = current_thread_info(); |
3732 | 3732 | ||
@@ -3738,9 +3738,9 @@ asmlinkage void __sched preempt_schedule(void) | |||
3738 | return; | 3738 | return; |
3739 | 3739 | ||
3740 | do { | 3740 | do { |
3741 | add_preempt_count(PREEMPT_ACTIVE); | 3741 | add_preempt_count_notrace(PREEMPT_ACTIVE); |
3742 | schedule(); | 3742 | schedule(); |
3743 | sub_preempt_count(PREEMPT_ACTIVE); | 3743 | sub_preempt_count_notrace(PREEMPT_ACTIVE); |
3744 | 3744 | ||
3745 | /* | 3745 | /* |
3746 | * Check again in case we missed a preemption opportunity | 3746 | * Check again in case we missed a preemption opportunity |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c deleted file mode 100644 index 4b493f67dcb5..000000000000 --- a/kernel/softlockup.c +++ /dev/null | |||
@@ -1,293 +0,0 @@ | |||
1 | /* | ||
2 | * Detect Soft Lockups | ||
3 | * | ||
4 | * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc. | ||
5 | * | ||
6 | * this code detects soft lockups: incidents in where on a CPU | ||
7 | * the kernel does not reschedule for 10 seconds or more. | ||
8 | */ | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/cpu.h> | ||
11 | #include <linux/nmi.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/freezer.h> | ||
15 | #include <linux/kthread.h> | ||
16 | #include <linux/lockdep.h> | ||
17 | #include <linux/notifier.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/sysctl.h> | ||
20 | |||
21 | #include <asm/irq_regs.h> | ||
22 | |||
23 | static DEFINE_SPINLOCK(print_lock); | ||
24 | |||
25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ | ||
26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ | ||
27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | ||
28 | static DEFINE_PER_CPU(bool, softlock_touch_sync); | ||
29 | |||
30 | static int __read_mostly did_panic; | ||
31 | int __read_mostly softlockup_thresh = 60; | ||
32 | |||
33 | /* | ||
34 | * Should we panic (and reboot, if panic_timeout= is set) when a | ||
35 | * soft-lockup occurs: | ||
36 | */ | ||
37 | unsigned int __read_mostly softlockup_panic = | ||
38 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
39 | |||
40 | static int __init softlockup_panic_setup(char *str) | ||
41 | { | ||
42 | softlockup_panic = simple_strtoul(str, NULL, 0); | ||
43 | |||
44 | return 1; | ||
45 | } | ||
46 | __setup("softlockup_panic=", softlockup_panic_setup); | ||
47 | |||
48 | static int | ||
49 | softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) | ||
50 | { | ||
51 | did_panic = 1; | ||
52 | |||
53 | return NOTIFY_DONE; | ||
54 | } | ||
55 | |||
56 | static struct notifier_block panic_block = { | ||
57 | .notifier_call = softlock_panic, | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * Returns seconds, approximately. We don't need nanosecond | ||
62 | * resolution, and we don't need to waste time with a big divide when | ||
63 | * 2^30ns == 1.074s. | ||
64 | */ | ||
65 | static unsigned long get_timestamp(int this_cpu) | ||
66 | { | ||
67 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ | ||
68 | } | ||
69 | |||
70 | static void __touch_softlockup_watchdog(void) | ||
71 | { | ||
72 | int this_cpu = raw_smp_processor_id(); | ||
73 | |||
74 | __raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu); | ||
75 | } | ||
76 | |||
77 | void touch_softlockup_watchdog(void) | ||
78 | { | ||
79 | __raw_get_cpu_var(softlockup_touch_ts) = 0; | ||
80 | } | ||
81 | EXPORT_SYMBOL(touch_softlockup_watchdog); | ||
82 | |||
83 | void touch_softlockup_watchdog_sync(void) | ||
84 | { | ||
85 | __raw_get_cpu_var(softlock_touch_sync) = true; | ||
86 | __raw_get_cpu_var(softlockup_touch_ts) = 0; | ||
87 | } | ||
88 | |||
89 | void touch_all_softlockup_watchdogs(void) | ||
90 | { | ||
91 | int cpu; | ||
92 | |||
93 | /* Cause each CPU to re-update its timestamp rather than complain */ | ||
94 | for_each_online_cpu(cpu) | ||
95 | per_cpu(softlockup_touch_ts, cpu) = 0; | ||
96 | } | ||
97 | EXPORT_SYMBOL(touch_all_softlockup_watchdogs); | ||
98 | |||
99 | int proc_dosoftlockup_thresh(struct ctl_table *table, int write, | ||
100 | void __user *buffer, | ||
101 | size_t *lenp, loff_t *ppos) | ||
102 | { | ||
103 | touch_all_softlockup_watchdogs(); | ||
104 | return proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
105 | } | ||
106 | |||
107 | /* | ||
108 | * This callback runs from the timer interrupt, and checks | ||
109 | * whether the watchdog thread has hung or not: | ||
110 | */ | ||
111 | void softlockup_tick(void) | ||
112 | { | ||
113 | int this_cpu = smp_processor_id(); | ||
114 | unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu); | ||
115 | unsigned long print_ts; | ||
116 | struct pt_regs *regs = get_irq_regs(); | ||
117 | unsigned long now; | ||
118 | |||
119 | /* Is detection switched off? */ | ||
120 | if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) { | ||
121 | /* Be sure we don't false trigger if switched back on */ | ||
122 | if (touch_ts) | ||
123 | per_cpu(softlockup_touch_ts, this_cpu) = 0; | ||
124 | return; | ||
125 | } | ||
126 | |||
127 | if (touch_ts == 0) { | ||
128 | if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { | ||
129 | /* | ||
130 | * If the time stamp was touched atomically | ||
131 | * make sure the scheduler tick is up to date. | ||
132 | */ | ||
133 | per_cpu(softlock_touch_sync, this_cpu) = false; | ||
134 | sched_clock_tick(); | ||
135 | } | ||
136 | __touch_softlockup_watchdog(); | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | print_ts = per_cpu(softlockup_print_ts, this_cpu); | ||
141 | |||
142 | /* report at most once a second */ | ||
143 | if (print_ts == touch_ts || did_panic) | ||
144 | return; | ||
145 | |||
146 | /* do not print during early bootup: */ | ||
147 | if (unlikely(system_state != SYSTEM_RUNNING)) { | ||
148 | __touch_softlockup_watchdog(); | ||
149 | return; | ||
150 | } | ||
151 | |||
152 | now = get_timestamp(this_cpu); | ||
153 | |||
154 | /* | ||
155 | * Wake up the high-prio watchdog task twice per | ||
156 | * threshold timespan. | ||
157 | */ | ||
158 | if (time_after(now - softlockup_thresh/2, touch_ts)) | ||
159 | wake_up_process(per_cpu(softlockup_watchdog, this_cpu)); | ||
160 | |||
161 | /* Warn about unreasonable delays: */ | ||
162 | if (time_before_eq(now - softlockup_thresh, touch_ts)) | ||
163 | return; | ||
164 | |||
165 | per_cpu(softlockup_print_ts, this_cpu) = touch_ts; | ||
166 | |||
167 | spin_lock(&print_lock); | ||
168 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n", | ||
169 | this_cpu, now - touch_ts, | ||
170 | current->comm, task_pid_nr(current)); | ||
171 | print_modules(); | ||
172 | print_irqtrace_events(current); | ||
173 | if (regs) | ||
174 | show_regs(regs); | ||
175 | else | ||
176 | dump_stack(); | ||
177 | spin_unlock(&print_lock); | ||
178 | |||
179 | if (softlockup_panic) | ||
180 | panic("softlockup: hung tasks"); | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * The watchdog thread - runs every second and touches the timestamp. | ||
185 | */ | ||
186 | static int watchdog(void *__bind_cpu) | ||
187 | { | ||
188 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | ||
189 | |||
190 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
191 | |||
192 | /* initialize timestamp */ | ||
193 | __touch_softlockup_watchdog(); | ||
194 | |||
195 | set_current_state(TASK_INTERRUPTIBLE); | ||
196 | /* | ||
197 | * Run briefly once per second to reset the softlockup timestamp. | ||
198 | * If this gets delayed for more than 60 seconds then the | ||
199 | * debug-printout triggers in softlockup_tick(). | ||
200 | */ | ||
201 | while (!kthread_should_stop()) { | ||
202 | __touch_softlockup_watchdog(); | ||
203 | schedule(); | ||
204 | |||
205 | if (kthread_should_stop()) | ||
206 | break; | ||
207 | |||
208 | set_current_state(TASK_INTERRUPTIBLE); | ||
209 | } | ||
210 | __set_current_state(TASK_RUNNING); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Create/destroy watchdog threads as CPUs come and go: | ||
217 | */ | ||
218 | static int __cpuinit | ||
219 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
220 | { | ||
221 | int hotcpu = (unsigned long)hcpu; | ||
222 | struct task_struct *p; | ||
223 | |||
224 | switch (action) { | ||
225 | case CPU_UP_PREPARE: | ||
226 | case CPU_UP_PREPARE_FROZEN: | ||
227 | BUG_ON(per_cpu(softlockup_watchdog, hotcpu)); | ||
228 | p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); | ||
229 | if (IS_ERR(p)) { | ||
230 | printk(KERN_ERR "watchdog for %i failed\n", hotcpu); | ||
231 | return NOTIFY_BAD; | ||
232 | } | ||
233 | per_cpu(softlockup_touch_ts, hotcpu) = 0; | ||
234 | per_cpu(softlockup_watchdog, hotcpu) = p; | ||
235 | kthread_bind(p, hotcpu); | ||
236 | break; | ||
237 | case CPU_ONLINE: | ||
238 | case CPU_ONLINE_FROZEN: | ||
239 | wake_up_process(per_cpu(softlockup_watchdog, hotcpu)); | ||
240 | break; | ||
241 | #ifdef CONFIG_HOTPLUG_CPU | ||
242 | case CPU_UP_CANCELED: | ||
243 | case CPU_UP_CANCELED_FROZEN: | ||
244 | if (!per_cpu(softlockup_watchdog, hotcpu)) | ||
245 | break; | ||
246 | /* Unbind so it can run. Fall thru. */ | ||
247 | kthread_bind(per_cpu(softlockup_watchdog, hotcpu), | ||
248 | cpumask_any(cpu_online_mask)); | ||
249 | case CPU_DEAD: | ||
250 | case CPU_DEAD_FROZEN: | ||
251 | p = per_cpu(softlockup_watchdog, hotcpu); | ||
252 | per_cpu(softlockup_watchdog, hotcpu) = NULL; | ||
253 | kthread_stop(p); | ||
254 | break; | ||
255 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
256 | } | ||
257 | return NOTIFY_OK; | ||
258 | } | ||
259 | |||
260 | static struct notifier_block __cpuinitdata cpu_nfb = { | ||
261 | .notifier_call = cpu_callback | ||
262 | }; | ||
263 | |||
264 | static int __initdata nosoftlockup; | ||
265 | |||
266 | static int __init nosoftlockup_setup(char *str) | ||
267 | { | ||
268 | nosoftlockup = 1; | ||
269 | return 1; | ||
270 | } | ||
271 | __setup("nosoftlockup", nosoftlockup_setup); | ||
272 | |||
273 | static int __init spawn_softlockup_task(void) | ||
274 | { | ||
275 | void *cpu = (void *)(long)smp_processor_id(); | ||
276 | int err; | ||
277 | |||
278 | if (nosoftlockup) | ||
279 | return 0; | ||
280 | |||
281 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | ||
282 | if (err == NOTIFY_BAD) { | ||
283 | BUG(); | ||
284 | return 1; | ||
285 | } | ||
286 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | ||
287 | register_cpu_notifier(&cpu_nfb); | ||
288 | |||
289 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | early_initcall(spawn_softlockup_task); | ||
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index d24f761f4876..6f79c7f81c96 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -76,6 +76,10 @@ | |||
76 | #include <scsi/sg.h> | 76 | #include <scsi/sg.h> |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #ifdef CONFIG_LOCKUP_DETECTOR | ||
80 | #include <linux/nmi.h> | ||
81 | #endif | ||
82 | |||
79 | 83 | ||
80 | #if defined(CONFIG_SYSCTL) | 84 | #if defined(CONFIG_SYSCTL) |
81 | 85 | ||
@@ -106,7 +110,7 @@ extern int blk_iopoll_enabled; | |||
106 | #endif | 110 | #endif |
107 | 111 | ||
108 | /* Constants used for minimum and maximum */ | 112 | /* Constants used for minimum and maximum */ |
109 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 113 | #ifdef CONFIG_LOCKUP_DETECTOR |
110 | static int sixty = 60; | 114 | static int sixty = 60; |
111 | static int neg_one = -1; | 115 | static int neg_one = -1; |
112 | #endif | 116 | #endif |
@@ -710,7 +714,34 @@ static struct ctl_table kern_table[] = { | |||
710 | .mode = 0444, | 714 | .mode = 0444, |
711 | .proc_handler = proc_dointvec, | 715 | .proc_handler = proc_dointvec, |
712 | }, | 716 | }, |
713 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) | 717 | #if defined(CONFIG_LOCKUP_DETECTOR) |
718 | { | ||
719 | .procname = "watchdog", | ||
720 | .data = &watchdog_enabled, | ||
721 | .maxlen = sizeof (int), | ||
722 | .mode = 0644, | ||
723 | .proc_handler = proc_dowatchdog_enabled, | ||
724 | }, | ||
725 | { | ||
726 | .procname = "watchdog_thresh", | ||
727 | .data = &softlockup_thresh, | ||
728 | .maxlen = sizeof(int), | ||
729 | .mode = 0644, | ||
730 | .proc_handler = proc_dowatchdog_thresh, | ||
731 | .extra1 = &neg_one, | ||
732 | .extra2 = &sixty, | ||
733 | }, | ||
734 | { | ||
735 | .procname = "softlockup_panic", | ||
736 | .data = &softlockup_panic, | ||
737 | .maxlen = sizeof(int), | ||
738 | .mode = 0644, | ||
739 | .proc_handler = proc_dointvec_minmax, | ||
740 | .extra1 = &zero, | ||
741 | .extra2 = &one, | ||
742 | }, | ||
743 | #endif | ||
744 | #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) && !defined(CONFIG_LOCKUP_DETECTOR) | ||
714 | { | 745 | { |
715 | .procname = "unknown_nmi_panic", | 746 | .procname = "unknown_nmi_panic", |
716 | .data = &unknown_nmi_panic, | 747 | .data = &unknown_nmi_panic, |
@@ -813,26 +844,6 @@ static struct ctl_table kern_table[] = { | |||
813 | .proc_handler = proc_dointvec, | 844 | .proc_handler = proc_dointvec, |
814 | }, | 845 | }, |
815 | #endif | 846 | #endif |
816 | #ifdef CONFIG_DETECT_SOFTLOCKUP | ||
817 | { | ||
818 | .procname = "softlockup_panic", | ||
819 | .data = &softlockup_panic, | ||
820 | .maxlen = sizeof(int), | ||
821 | .mode = 0644, | ||
822 | .proc_handler = proc_dointvec_minmax, | ||
823 | .extra1 = &zero, | ||
824 | .extra2 = &one, | ||
825 | }, | ||
826 | { | ||
827 | .procname = "softlockup_thresh", | ||
828 | .data = &softlockup_thresh, | ||
829 | .maxlen = sizeof(int), | ||
830 | .mode = 0644, | ||
831 | .proc_handler = proc_dosoftlockup_thresh, | ||
832 | .extra1 = &neg_one, | ||
833 | .extra2 = &sixty, | ||
834 | }, | ||
835 | #endif | ||
836 | #ifdef CONFIG_DETECT_HUNG_TASK | 847 | #ifdef CONFIG_DETECT_HUNG_TASK |
837 | { | 848 | { |
838 | .procname = "hung_task_panic", | 849 | .procname = "hung_task_panic", |
diff --git a/kernel/timer.c b/kernel/timer.c index efde11e197c4..6aa6f7e69ad5 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1302,7 +1302,6 @@ void run_local_timers(void) | |||
1302 | { | 1302 | { |
1303 | hrtimer_run_queues(); | 1303 | hrtimer_run_queues(); |
1304 | raise_softirq(TIMER_SOFTIRQ); | 1304 | raise_softirq(TIMER_SOFTIRQ); |
1305 | softlockup_tick(); | ||
1306 | } | 1305 | } |
1307 | 1306 | ||
1308 | /* | 1307 | /* |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 8b1797c4545b..c7683fd8a03a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -194,15 +194,6 @@ config PREEMPT_TRACER | |||
194 | enabled. This option and the irqs-off timing option can be | 194 | enabled. This option and the irqs-off timing option can be |
195 | used together or separately.) | 195 | used together or separately.) |
196 | 196 | ||
197 | config SYSPROF_TRACER | ||
198 | bool "Sysprof Tracer" | ||
199 | depends on X86 | ||
200 | select GENERIC_TRACER | ||
201 | select CONTEXT_SWITCH_TRACER | ||
202 | help | ||
203 | This tracer provides the trace needed by the 'Sysprof' userspace | ||
204 | tool. | ||
205 | |||
206 | config SCHED_TRACER | 197 | config SCHED_TRACER |
207 | bool "Scheduling Latency Tracer" | 198 | bool "Scheduling Latency Tracer" |
208 | select GENERIC_TRACER | 199 | select GENERIC_TRACER |
@@ -229,23 +220,6 @@ config FTRACE_SYSCALLS | |||
229 | help | 220 | help |
230 | Basic tracer to catch the syscall entry and exit events. | 221 | Basic tracer to catch the syscall entry and exit events. |
231 | 222 | ||
232 | config BOOT_TRACER | ||
233 | bool "Trace boot initcalls" | ||
234 | select GENERIC_TRACER | ||
235 | select CONTEXT_SWITCH_TRACER | ||
236 | help | ||
237 | This tracer helps developers to optimize boot times: it records | ||
238 | the timings of the initcalls and traces key events and the identity | ||
239 | of tasks that can cause boot delays, such as context-switches. | ||
240 | |||
241 | Its aim is to be parsed by the scripts/bootgraph.pl tool to | ||
242 | produce pretty graphics about boot inefficiencies, giving a visual | ||
243 | representation of the delays during initcalls - but the raw | ||
244 | /debug/tracing/trace text output is readable too. | ||
245 | |||
246 | You must pass in initcall_debug and ftrace=initcall to the kernel | ||
247 | command line to enable this on bootup. | ||
248 | |||
249 | config TRACE_BRANCH_PROFILING | 223 | config TRACE_BRANCH_PROFILING |
250 | bool | 224 | bool |
251 | select GENERIC_TRACER | 225 | select GENERIC_TRACER |
@@ -325,28 +299,6 @@ config BRANCH_TRACER | |||
325 | 299 | ||
326 | Say N if unsure. | 300 | Say N if unsure. |
327 | 301 | ||
328 | config KSYM_TRACER | ||
329 | bool "Trace read and write access on kernel memory locations" | ||
330 | depends on HAVE_HW_BREAKPOINT | ||
331 | select TRACING | ||
332 | help | ||
333 | This tracer helps find read and write operations on any given kernel | ||
334 | symbol i.e. /proc/kallsyms. | ||
335 | |||
336 | config PROFILE_KSYM_TRACER | ||
337 | bool "Profile all kernel memory accesses on 'watched' variables" | ||
338 | depends on KSYM_TRACER | ||
339 | help | ||
340 | This tracer profiles kernel accesses on variables watched through the | ||
341 | ksym tracer ftrace plugin. Depending upon the hardware, all read | ||
342 | and write operations on kernel variables can be monitored for | ||
343 | accesses. | ||
344 | |||
345 | The results will be displayed in: | ||
346 | /debugfs/tracing/profile_ksym | ||
347 | |||
348 | Say N if unsure. | ||
349 | |||
350 | config STACK_TRACER | 302 | config STACK_TRACER |
351 | bool "Trace max stack" | 303 | bool "Trace max stack" |
352 | depends on HAVE_FUNCTION_TRACER | 304 | depends on HAVE_FUNCTION_TRACER |
@@ -371,26 +323,6 @@ config STACK_TRACER | |||
371 | 323 | ||
372 | Say N if unsure. | 324 | Say N if unsure. |
373 | 325 | ||
374 | config KMEMTRACE | ||
375 | bool "Trace SLAB allocations" | ||
376 | select GENERIC_TRACER | ||
377 | help | ||
378 | kmemtrace provides tracing for slab allocator functions, such as | ||
379 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free, etc. Collected | ||
380 | data is then fed to the userspace application in order to analyse | ||
381 | allocation hotspots, internal fragmentation and so on, making it | ||
382 | possible to see how well an allocator performs, as well as debug | ||
383 | and profile kernel code. | ||
384 | |||
385 | This requires an userspace application to use. See | ||
386 | Documentation/trace/kmemtrace.txt for more information. | ||
387 | |||
388 | Saying Y will make the kernel somewhat larger and slower. However, | ||
389 | if you disable kmemtrace at run-time or boot-time, the performance | ||
390 | impact is minimal (depending on the arch the kernel is built for). | ||
391 | |||
392 | If unsure, say N. | ||
393 | |||
394 | config WORKQUEUE_TRACER | 326 | config WORKQUEUE_TRACER |
395 | bool "Trace workqueues" | 327 | bool "Trace workqueues" |
396 | select GENERIC_TRACER | 328 | select GENERIC_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 4215530b490b..53f338190b26 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -30,7 +30,6 @@ obj-$(CONFIG_TRACING) += trace_output.o | |||
30 | obj-$(CONFIG_TRACING) += trace_stat.o | 30 | obj-$(CONFIG_TRACING) += trace_stat.o |
31 | obj-$(CONFIG_TRACING) += trace_printk.o | 31 | obj-$(CONFIG_TRACING) += trace_printk.o |
32 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 32 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
33 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | ||
34 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | 33 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
35 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o | 34 | obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o |
36 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o | 35 | obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o |
@@ -38,10 +37,8 @@ obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o | |||
38 | obj-$(CONFIG_NOP_TRACER) += trace_nop.o | 37 | obj-$(CONFIG_NOP_TRACER) += trace_nop.o |
39 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o | 38 | obj-$(CONFIG_STACK_TRACER) += trace_stack.o |
40 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o | 39 | obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o |
41 | obj-$(CONFIG_BOOT_TRACER) += trace_boot.o | ||
42 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | 40 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o |
43 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 41 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
44 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
45 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | 42 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o |
46 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | 43 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o |
47 | ifeq ($(CONFIG_BLOCK),y) | 44 | ifeq ($(CONFIG_BLOCK),y) |
@@ -55,7 +52,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o | |||
55 | endif | 52 | endif |
56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 53 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
57 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 54 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
58 | obj-$(CONFIG_KSYM_TRACER) += trace_ksym.o | ||
59 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o | 55 | obj-$(CONFIG_EVENT_TRACING) += power-traces.o |
60 | ifeq ($(CONFIG_TRACING),y) | 56 | ifeq ($(CONFIG_TRACING),y) |
61 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o | 57 | obj-$(CONFIG_KGDB_KDB) += trace_kdb.o |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6d2cb14f9449..0d88ce9b9fb8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1883,7 +1883,6 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1883 | struct hlist_head *hhd; | 1883 | struct hlist_head *hhd; |
1884 | struct hlist_node *n; | 1884 | struct hlist_node *n; |
1885 | unsigned long key; | 1885 | unsigned long key; |
1886 | int resched; | ||
1887 | 1886 | ||
1888 | key = hash_long(ip, FTRACE_HASH_BITS); | 1887 | key = hash_long(ip, FTRACE_HASH_BITS); |
1889 | 1888 | ||
@@ -1897,12 +1896,12 @@ function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | |||
1897 | * period. This syncs the hash iteration and freeing of items | 1896 | * period. This syncs the hash iteration and freeing of items |
1898 | * on the hash. rcu_read_lock is too dangerous here. | 1897 | * on the hash. rcu_read_lock is too dangerous here. |
1899 | */ | 1898 | */ |
1900 | resched = ftrace_preempt_disable(); | 1899 | preempt_disable_notrace(); |
1901 | hlist_for_each_entry_rcu(entry, n, hhd, node) { | 1900 | hlist_for_each_entry_rcu(entry, n, hhd, node) { |
1902 | if (entry->ip == ip) | 1901 | if (entry->ip == ip) |
1903 | entry->ops->func(ip, parent_ip, &entry->data); | 1902 | entry->ops->func(ip, parent_ip, &entry->data); |
1904 | } | 1903 | } |
1905 | ftrace_preempt_enable(resched); | 1904 | preempt_enable_notrace(); |
1906 | } | 1905 | } |
1907 | 1906 | ||
1908 | static struct ftrace_ops trace_probe_ops __read_mostly = | 1907 | static struct ftrace_ops trace_probe_ops __read_mostly = |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c deleted file mode 100644 index bbfc1bb1660b..000000000000 --- a/kernel/trace/kmemtrace.c +++ /dev/null | |||
@@ -1,529 +0,0 @@ | |||
1 | /* | ||
2 | * Memory allocator tracing | ||
3 | * | ||
4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | ||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/tracepoint.h> | ||
10 | #include <linux/seq_file.h> | ||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/dcache.h> | ||
13 | #include <linux/fs.h> | ||
14 | |||
15 | #include <linux/kmemtrace.h> | ||
16 | |||
17 | #include "trace_output.h" | ||
18 | #include "trace.h" | ||
19 | |||
20 | /* Select an alternative, minimalistic output than the original one */ | ||
21 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | ||
22 | |||
23 | static struct tracer_opt kmem_opts[] = { | ||
24 | /* Default disable the minimalistic output */ | ||
25 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | ||
26 | { } | ||
27 | }; | ||
28 | |||
29 | static struct tracer_flags kmem_tracer_flags = { | ||
30 | .val = 0, | ||
31 | .opts = kmem_opts | ||
32 | }; | ||
33 | |||
34 | static struct trace_array *kmemtrace_array; | ||
35 | |||
36 | /* Trace allocations */ | ||
37 | static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id, | ||
38 | unsigned long call_site, | ||
39 | const void *ptr, | ||
40 | size_t bytes_req, | ||
41 | size_t bytes_alloc, | ||
42 | gfp_t gfp_flags, | ||
43 | int node) | ||
44 | { | ||
45 | struct ftrace_event_call *call = &event_kmem_alloc; | ||
46 | struct trace_array *tr = kmemtrace_array; | ||
47 | struct kmemtrace_alloc_entry *entry; | ||
48 | struct ring_buffer_event *event; | ||
49 | |||
50 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
51 | if (!event) | ||
52 | return; | ||
53 | |||
54 | entry = ring_buffer_event_data(event); | ||
55 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
56 | |||
57 | entry->ent.type = TRACE_KMEM_ALLOC; | ||
58 | entry->type_id = type_id; | ||
59 | entry->call_site = call_site; | ||
60 | entry->ptr = ptr; | ||
61 | entry->bytes_req = bytes_req; | ||
62 | entry->bytes_alloc = bytes_alloc; | ||
63 | entry->gfp_flags = gfp_flags; | ||
64 | entry->node = node; | ||
65 | |||
66 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
67 | ring_buffer_unlock_commit(tr->buffer, event); | ||
68 | |||
69 | trace_wake_up(); | ||
70 | } | ||
71 | |||
72 | static inline void kmemtrace_free(enum kmemtrace_type_id type_id, | ||
73 | unsigned long call_site, | ||
74 | const void *ptr) | ||
75 | { | ||
76 | struct ftrace_event_call *call = &event_kmem_free; | ||
77 | struct trace_array *tr = kmemtrace_array; | ||
78 | struct kmemtrace_free_entry *entry; | ||
79 | struct ring_buffer_event *event; | ||
80 | |||
81 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry)); | ||
82 | if (!event) | ||
83 | return; | ||
84 | entry = ring_buffer_event_data(event); | ||
85 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
86 | |||
87 | entry->ent.type = TRACE_KMEM_FREE; | ||
88 | entry->type_id = type_id; | ||
89 | entry->call_site = call_site; | ||
90 | entry->ptr = ptr; | ||
91 | |||
92 | if (!filter_check_discard(call, entry, tr->buffer, event)) | ||
93 | ring_buffer_unlock_commit(tr->buffer, event); | ||
94 | |||
95 | trace_wake_up(); | ||
96 | } | ||
97 | |||
98 | static void kmemtrace_kmalloc(void *ignore, | ||
99 | unsigned long call_site, | ||
100 | const void *ptr, | ||
101 | size_t bytes_req, | ||
102 | size_t bytes_alloc, | ||
103 | gfp_t gfp_flags) | ||
104 | { | ||
105 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
106 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
107 | } | ||
108 | |||
109 | static void kmemtrace_kmem_cache_alloc(void *ignore, | ||
110 | unsigned long call_site, | ||
111 | const void *ptr, | ||
112 | size_t bytes_req, | ||
113 | size_t bytes_alloc, | ||
114 | gfp_t gfp_flags) | ||
115 | { | ||
116 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
117 | bytes_req, bytes_alloc, gfp_flags, -1); | ||
118 | } | ||
119 | |||
120 | static void kmemtrace_kmalloc_node(void *ignore, | ||
121 | unsigned long call_site, | ||
122 | const void *ptr, | ||
123 | size_t bytes_req, | ||
124 | size_t bytes_alloc, | ||
125 | gfp_t gfp_flags, | ||
126 | int node) | ||
127 | { | ||
128 | kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr, | ||
129 | bytes_req, bytes_alloc, gfp_flags, node); | ||
130 | } | ||
131 | |||
132 | static void kmemtrace_kmem_cache_alloc_node(void *ignore, | ||
133 | unsigned long call_site, | ||
134 | const void *ptr, | ||
135 | size_t bytes_req, | ||
136 | size_t bytes_alloc, | ||
137 | gfp_t gfp_flags, | ||
138 | int node) | ||
139 | { | ||
140 | kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr, | ||
141 | bytes_req, bytes_alloc, gfp_flags, node); | ||
142 | } | ||
143 | |||
144 | static void | ||
145 | kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr) | ||
146 | { | ||
147 | kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); | ||
148 | } | ||
149 | |||
150 | static void kmemtrace_kmem_cache_free(void *ignore, | ||
151 | unsigned long call_site, const void *ptr) | ||
152 | { | ||
153 | kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); | ||
154 | } | ||
155 | |||
156 | static int kmemtrace_start_probes(void) | ||
157 | { | ||
158 | int err; | ||
159 | |||
160 | err = register_trace_kmalloc(kmemtrace_kmalloc, NULL); | ||
161 | if (err) | ||
162 | return err; | ||
163 | err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); | ||
164 | if (err) | ||
165 | return err; | ||
166 | err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); | ||
167 | if (err) | ||
168 | return err; | ||
169 | err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); | ||
170 | if (err) | ||
171 | return err; | ||
172 | err = register_trace_kfree(kmemtrace_kfree, NULL); | ||
173 | if (err) | ||
174 | return err; | ||
175 | err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); | ||
176 | |||
177 | return err; | ||
178 | } | ||
179 | |||
180 | static void kmemtrace_stop_probes(void) | ||
181 | { | ||
182 | unregister_trace_kmalloc(kmemtrace_kmalloc, NULL); | ||
183 | unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL); | ||
184 | unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL); | ||
185 | unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL); | ||
186 | unregister_trace_kfree(kmemtrace_kfree, NULL); | ||
187 | unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL); | ||
188 | } | ||
189 | |||
190 | static int kmem_trace_init(struct trace_array *tr) | ||
191 | { | ||
192 | kmemtrace_array = tr; | ||
193 | |||
194 | tracing_reset_online_cpus(tr); | ||
195 | |||
196 | kmemtrace_start_probes(); | ||
197 | |||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static void kmem_trace_reset(struct trace_array *tr) | ||
202 | { | ||
203 | kmemtrace_stop_probes(); | ||
204 | } | ||
205 | |||
206 | static void kmemtrace_headers(struct seq_file *s) | ||
207 | { | ||
208 | /* Don't need headers for the original kmemtrace output */ | ||
209 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
210 | return; | ||
211 | |||
212 | seq_printf(s, "#\n"); | ||
213 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | ||
214 | " POINTER NODE CALLER\n"); | ||
215 | seq_printf(s, "# FREE | | | | " | ||
216 | " | | | |\n"); | ||
217 | seq_printf(s, "# |\n\n"); | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * The following functions give the original output from kmemtrace, | ||
222 | * plus the origin CPU, since reordering occurs in-kernel now. | ||
223 | */ | ||
224 | |||
225 | #define KMEMTRACE_USER_ALLOC 0 | ||
226 | #define KMEMTRACE_USER_FREE 1 | ||
227 | |||
228 | struct kmemtrace_user_event { | ||
229 | u8 event_id; | ||
230 | u8 type_id; | ||
231 | u16 event_size; | ||
232 | u32 cpu; | ||
233 | u64 timestamp; | ||
234 | unsigned long call_site; | ||
235 | unsigned long ptr; | ||
236 | }; | ||
237 | |||
238 | struct kmemtrace_user_event_alloc { | ||
239 | size_t bytes_req; | ||
240 | size_t bytes_alloc; | ||
241 | unsigned gfp_flags; | ||
242 | int node; | ||
243 | }; | ||
244 | |||
245 | static enum print_line_t | ||
246 | kmemtrace_print_alloc(struct trace_iterator *iter, int flags, | ||
247 | struct trace_event *event) | ||
248 | { | ||
249 | struct trace_seq *s = &iter->seq; | ||
250 | struct kmemtrace_alloc_entry *entry; | ||
251 | int ret; | ||
252 | |||
253 | trace_assign_type(entry, iter->ent); | ||
254 | |||
255 | ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu " | ||
256 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
257 | entry->type_id, (void *)entry->call_site, (unsigned long)entry->ptr, | ||
258 | (unsigned long)entry->bytes_req, (unsigned long)entry->bytes_alloc, | ||
259 | (unsigned long)entry->gfp_flags, entry->node); | ||
260 | |||
261 | if (!ret) | ||
262 | return TRACE_TYPE_PARTIAL_LINE; | ||
263 | return TRACE_TYPE_HANDLED; | ||
264 | } | ||
265 | |||
266 | static enum print_line_t | ||
267 | kmemtrace_print_free(struct trace_iterator *iter, int flags, | ||
268 | struct trace_event *event) | ||
269 | { | ||
270 | struct trace_seq *s = &iter->seq; | ||
271 | struct kmemtrace_free_entry *entry; | ||
272 | int ret; | ||
273 | |||
274 | trace_assign_type(entry, iter->ent); | ||
275 | |||
276 | ret = trace_seq_printf(s, "type_id %d call_site %pF ptr %lu\n", | ||
277 | entry->type_id, (void *)entry->call_site, | ||
278 | (unsigned long)entry->ptr); | ||
279 | |||
280 | if (!ret) | ||
281 | return TRACE_TYPE_PARTIAL_LINE; | ||
282 | return TRACE_TYPE_HANDLED; | ||
283 | } | ||
284 | |||
285 | static enum print_line_t | ||
286 | kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags, | ||
287 | struct trace_event *event) | ||
288 | { | ||
289 | struct trace_seq *s = &iter->seq; | ||
290 | struct kmemtrace_alloc_entry *entry; | ||
291 | struct kmemtrace_user_event *ev; | ||
292 | struct kmemtrace_user_event_alloc *ev_alloc; | ||
293 | |||
294 | trace_assign_type(entry, iter->ent); | ||
295 | |||
296 | ev = trace_seq_reserve(s, sizeof(*ev)); | ||
297 | if (!ev) | ||
298 | return TRACE_TYPE_PARTIAL_LINE; | ||
299 | |||
300 | ev->event_id = KMEMTRACE_USER_ALLOC; | ||
301 | ev->type_id = entry->type_id; | ||
302 | ev->event_size = sizeof(*ev) + sizeof(*ev_alloc); | ||
303 | ev->cpu = iter->cpu; | ||
304 | ev->timestamp = iter->ts; | ||
305 | ev->call_site = entry->call_site; | ||
306 | ev->ptr = (unsigned long)entry->ptr; | ||
307 | |||
308 | ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc)); | ||
309 | if (!ev_alloc) | ||
310 | return TRACE_TYPE_PARTIAL_LINE; | ||
311 | |||
312 | ev_alloc->bytes_req = entry->bytes_req; | ||
313 | ev_alloc->bytes_alloc = entry->bytes_alloc; | ||
314 | ev_alloc->gfp_flags = entry->gfp_flags; | ||
315 | ev_alloc->node = entry->node; | ||
316 | |||
317 | return TRACE_TYPE_HANDLED; | ||
318 | } | ||
319 | |||
320 | static enum print_line_t | ||
321 | kmemtrace_print_free_user(struct trace_iterator *iter, int flags, | ||
322 | struct trace_event *event) | ||
323 | { | ||
324 | struct trace_seq *s = &iter->seq; | ||
325 | struct kmemtrace_free_entry *entry; | ||
326 | struct kmemtrace_user_event *ev; | ||
327 | |||
328 | trace_assign_type(entry, iter->ent); | ||
329 | |||
330 | ev = trace_seq_reserve(s, sizeof(*ev)); | ||
331 | if (!ev) | ||
332 | return TRACE_TYPE_PARTIAL_LINE; | ||
333 | |||
334 | ev->event_id = KMEMTRACE_USER_FREE; | ||
335 | ev->type_id = entry->type_id; | ||
336 | ev->event_size = sizeof(*ev); | ||
337 | ev->cpu = iter->cpu; | ||
338 | ev->timestamp = iter->ts; | ||
339 | ev->call_site = entry->call_site; | ||
340 | ev->ptr = (unsigned long)entry->ptr; | ||
341 | |||
342 | return TRACE_TYPE_HANDLED; | ||
343 | } | ||
344 | |||
345 | /* The two other following provide a more minimalistic output */ | ||
346 | static enum print_line_t | ||
347 | kmemtrace_print_alloc_compress(struct trace_iterator *iter) | ||
348 | { | ||
349 | struct kmemtrace_alloc_entry *entry; | ||
350 | struct trace_seq *s = &iter->seq; | ||
351 | int ret; | ||
352 | |||
353 | trace_assign_type(entry, iter->ent); | ||
354 | |||
355 | /* Alloc entry */ | ||
356 | ret = trace_seq_printf(s, " + "); | ||
357 | if (!ret) | ||
358 | return TRACE_TYPE_PARTIAL_LINE; | ||
359 | |||
360 | /* Type */ | ||
361 | switch (entry->type_id) { | ||
362 | case KMEMTRACE_TYPE_KMALLOC: | ||
363 | ret = trace_seq_printf(s, "K "); | ||
364 | break; | ||
365 | case KMEMTRACE_TYPE_CACHE: | ||
366 | ret = trace_seq_printf(s, "C "); | ||
367 | break; | ||
368 | case KMEMTRACE_TYPE_PAGES: | ||
369 | ret = trace_seq_printf(s, "P "); | ||
370 | break; | ||
371 | default: | ||
372 | ret = trace_seq_printf(s, "? "); | ||
373 | } | ||
374 | |||
375 | if (!ret) | ||
376 | return TRACE_TYPE_PARTIAL_LINE; | ||
377 | |||
378 | /* Requested */ | ||
379 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_req); | ||
380 | if (!ret) | ||
381 | return TRACE_TYPE_PARTIAL_LINE; | ||
382 | |||
383 | /* Allocated */ | ||
384 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc); | ||
385 | if (!ret) | ||
386 | return TRACE_TYPE_PARTIAL_LINE; | ||
387 | |||
388 | /* Flags | ||
389 | * TODO: would be better to see the name of the GFP flag names | ||
390 | */ | ||
391 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | ||
392 | if (!ret) | ||
393 | return TRACE_TYPE_PARTIAL_LINE; | ||
394 | |||
395 | /* Pointer to allocated */ | ||
396 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
397 | if (!ret) | ||
398 | return TRACE_TYPE_PARTIAL_LINE; | ||
399 | |||
400 | /* Node and call site*/ | ||
401 | ret = trace_seq_printf(s, "%4d %pf\n", entry->node, | ||
402 | (void *)entry->call_site); | ||
403 | if (!ret) | ||
404 | return TRACE_TYPE_PARTIAL_LINE; | ||
405 | |||
406 | return TRACE_TYPE_HANDLED; | ||
407 | } | ||
408 | |||
409 | static enum print_line_t | ||
410 | kmemtrace_print_free_compress(struct trace_iterator *iter) | ||
411 | { | ||
412 | struct kmemtrace_free_entry *entry; | ||
413 | struct trace_seq *s = &iter->seq; | ||
414 | int ret; | ||
415 | |||
416 | trace_assign_type(entry, iter->ent); | ||
417 | |||
418 | /* Free entry */ | ||
419 | ret = trace_seq_printf(s, " - "); | ||
420 | if (!ret) | ||
421 | return TRACE_TYPE_PARTIAL_LINE; | ||
422 | |||
423 | /* Type */ | ||
424 | switch (entry->type_id) { | ||
425 | case KMEMTRACE_TYPE_KMALLOC: | ||
426 | ret = trace_seq_printf(s, "K "); | ||
427 | break; | ||
428 | case KMEMTRACE_TYPE_CACHE: | ||
429 | ret = trace_seq_printf(s, "C "); | ||
430 | break; | ||
431 | case KMEMTRACE_TYPE_PAGES: | ||
432 | ret = trace_seq_printf(s, "P "); | ||
433 | break; | ||
434 | default: | ||
435 | ret = trace_seq_printf(s, "? "); | ||
436 | } | ||
437 | |||
438 | if (!ret) | ||
439 | return TRACE_TYPE_PARTIAL_LINE; | ||
440 | |||
441 | /* Skip requested/allocated/flags */ | ||
442 | ret = trace_seq_printf(s, " "); | ||
443 | if (!ret) | ||
444 | return TRACE_TYPE_PARTIAL_LINE; | ||
445 | |||
446 | /* Pointer to allocated */ | ||
447 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
448 | if (!ret) | ||
449 | return TRACE_TYPE_PARTIAL_LINE; | ||
450 | |||
451 | /* Skip node and print call site*/ | ||
452 | ret = trace_seq_printf(s, " %pf\n", (void *)entry->call_site); | ||
453 | if (!ret) | ||
454 | return TRACE_TYPE_PARTIAL_LINE; | ||
455 | |||
456 | return TRACE_TYPE_HANDLED; | ||
457 | } | ||
458 | |||
459 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | ||
460 | { | ||
461 | struct trace_entry *entry = iter->ent; | ||
462 | |||
463 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
464 | return TRACE_TYPE_UNHANDLED; | ||
465 | |||
466 | switch (entry->type) { | ||
467 | case TRACE_KMEM_ALLOC: | ||
468 | return kmemtrace_print_alloc_compress(iter); | ||
469 | case TRACE_KMEM_FREE: | ||
470 | return kmemtrace_print_free_compress(iter); | ||
471 | default: | ||
472 | return TRACE_TYPE_UNHANDLED; | ||
473 | } | ||
474 | } | ||
475 | |||
476 | static struct trace_event_functions kmem_trace_alloc_funcs = { | ||
477 | .trace = kmemtrace_print_alloc, | ||
478 | .binary = kmemtrace_print_alloc_user, | ||
479 | }; | ||
480 | |||
481 | static struct trace_event kmem_trace_alloc = { | ||
482 | .type = TRACE_KMEM_ALLOC, | ||
483 | .funcs = &kmem_trace_alloc_funcs, | ||
484 | }; | ||
485 | |||
486 | static struct trace_event_functions kmem_trace_free_funcs = { | ||
487 | .trace = kmemtrace_print_free, | ||
488 | .binary = kmemtrace_print_free_user, | ||
489 | }; | ||
490 | |||
491 | static struct trace_event kmem_trace_free = { | ||
492 | .type = TRACE_KMEM_FREE, | ||
493 | .funcs = &kmem_trace_free_funcs, | ||
494 | }; | ||
495 | |||
496 | static struct tracer kmem_tracer __read_mostly = { | ||
497 | .name = "kmemtrace", | ||
498 | .init = kmem_trace_init, | ||
499 | .reset = kmem_trace_reset, | ||
500 | .print_line = kmemtrace_print_line, | ||
501 | .print_header = kmemtrace_headers, | ||
502 | .flags = &kmem_tracer_flags | ||
503 | }; | ||
504 | |||
505 | void kmemtrace_init(void) | ||
506 | { | ||
507 | /* earliest opportunity to start kmem tracing */ | ||
508 | } | ||
509 | |||
510 | static int __init init_kmem_tracer(void) | ||
511 | { | ||
512 | if (!register_ftrace_event(&kmem_trace_alloc)) { | ||
513 | pr_warning("Warning: could not register kmem events\n"); | ||
514 | return 1; | ||
515 | } | ||
516 | |||
517 | if (!register_ftrace_event(&kmem_trace_free)) { | ||
518 | pr_warning("Warning: could not register kmem events\n"); | ||
519 | return 1; | ||
520 | } | ||
521 | |||
522 | if (register_tracer(&kmem_tracer) != 0) { | ||
523 | pr_warning("Warning: could not register the kmem tracer\n"); | ||
524 | return 1; | ||
525 | } | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | device_initcall(init_kmem_tracer); | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 1da7b6ea8b85..3632ce87674f 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -443,6 +443,7 @@ int ring_buffer_print_page_header(struct trace_seq *s) | |||
443 | */ | 443 | */ |
444 | struct ring_buffer_per_cpu { | 444 | struct ring_buffer_per_cpu { |
445 | int cpu; | 445 | int cpu; |
446 | atomic_t record_disabled; | ||
446 | struct ring_buffer *buffer; | 447 | struct ring_buffer *buffer; |
447 | spinlock_t reader_lock; /* serialize readers */ | 448 | spinlock_t reader_lock; /* serialize readers */ |
448 | arch_spinlock_t lock; | 449 | arch_spinlock_t lock; |
@@ -462,7 +463,6 @@ struct ring_buffer_per_cpu { | |||
462 | unsigned long read; | 463 | unsigned long read; |
463 | u64 write_stamp; | 464 | u64 write_stamp; |
464 | u64 read_stamp; | 465 | u64 read_stamp; |
465 | atomic_t record_disabled; | ||
466 | }; | 466 | }; |
467 | 467 | ||
468 | struct ring_buffer { | 468 | struct ring_buffer { |
@@ -2242,8 +2242,6 @@ static void trace_recursive_unlock(void) | |||
2242 | 2242 | ||
2243 | #endif | 2243 | #endif |
2244 | 2244 | ||
2245 | static DEFINE_PER_CPU(int, rb_need_resched); | ||
2246 | |||
2247 | /** | 2245 | /** |
2248 | * ring_buffer_lock_reserve - reserve a part of the buffer | 2246 | * ring_buffer_lock_reserve - reserve a part of the buffer |
2249 | * @buffer: the ring buffer to reserve from | 2247 | * @buffer: the ring buffer to reserve from |
@@ -2264,13 +2262,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2264 | { | 2262 | { |
2265 | struct ring_buffer_per_cpu *cpu_buffer; | 2263 | struct ring_buffer_per_cpu *cpu_buffer; |
2266 | struct ring_buffer_event *event; | 2264 | struct ring_buffer_event *event; |
2267 | int cpu, resched; | 2265 | int cpu; |
2268 | 2266 | ||
2269 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2267 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2270 | return NULL; | 2268 | return NULL; |
2271 | 2269 | ||
2272 | /* If we are tracing schedule, we don't want to recurse */ | 2270 | /* If we are tracing schedule, we don't want to recurse */ |
2273 | resched = ftrace_preempt_disable(); | 2271 | preempt_disable_notrace(); |
2274 | 2272 | ||
2275 | if (atomic_read(&buffer->record_disabled)) | 2273 | if (atomic_read(&buffer->record_disabled)) |
2276 | goto out_nocheck; | 2274 | goto out_nocheck; |
@@ -2295,21 +2293,13 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
2295 | if (!event) | 2293 | if (!event) |
2296 | goto out; | 2294 | goto out; |
2297 | 2295 | ||
2298 | /* | ||
2299 | * Need to store resched state on this cpu. | ||
2300 | * Only the first needs to. | ||
2301 | */ | ||
2302 | |||
2303 | if (preempt_count() == 1) | ||
2304 | per_cpu(rb_need_resched, cpu) = resched; | ||
2305 | |||
2306 | return event; | 2296 | return event; |
2307 | 2297 | ||
2308 | out: | 2298 | out: |
2309 | trace_recursive_unlock(); | 2299 | trace_recursive_unlock(); |
2310 | 2300 | ||
2311 | out_nocheck: | 2301 | out_nocheck: |
2312 | ftrace_preempt_enable(resched); | 2302 | preempt_enable_notrace(); |
2313 | return NULL; | 2303 | return NULL; |
2314 | } | 2304 | } |
2315 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); | 2305 | EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); |
@@ -2355,13 +2345,7 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
2355 | 2345 | ||
2356 | trace_recursive_unlock(); | 2346 | trace_recursive_unlock(); |
2357 | 2347 | ||
2358 | /* | 2348 | preempt_enable_notrace(); |
2359 | * Only the last preempt count needs to restore preemption. | ||
2360 | */ | ||
2361 | if (preempt_count() == 1) | ||
2362 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2363 | else | ||
2364 | preempt_enable_no_resched_notrace(); | ||
2365 | 2349 | ||
2366 | return 0; | 2350 | return 0; |
2367 | } | 2351 | } |
@@ -2469,13 +2453,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer, | |||
2469 | 2453 | ||
2470 | trace_recursive_unlock(); | 2454 | trace_recursive_unlock(); |
2471 | 2455 | ||
2472 | /* | 2456 | preempt_enable_notrace(); |
2473 | * Only the last preempt count needs to restore preemption. | ||
2474 | */ | ||
2475 | if (preempt_count() == 1) | ||
2476 | ftrace_preempt_enable(per_cpu(rb_need_resched, cpu)); | ||
2477 | else | ||
2478 | preempt_enable_no_resched_notrace(); | ||
2479 | 2457 | ||
2480 | } | 2458 | } |
2481 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); | 2459 | EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); |
@@ -2501,12 +2479,12 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2501 | struct ring_buffer_event *event; | 2479 | struct ring_buffer_event *event; |
2502 | void *body; | 2480 | void *body; |
2503 | int ret = -EBUSY; | 2481 | int ret = -EBUSY; |
2504 | int cpu, resched; | 2482 | int cpu; |
2505 | 2483 | ||
2506 | if (ring_buffer_flags != RB_BUFFERS_ON) | 2484 | if (ring_buffer_flags != RB_BUFFERS_ON) |
2507 | return -EBUSY; | 2485 | return -EBUSY; |
2508 | 2486 | ||
2509 | resched = ftrace_preempt_disable(); | 2487 | preempt_disable_notrace(); |
2510 | 2488 | ||
2511 | if (atomic_read(&buffer->record_disabled)) | 2489 | if (atomic_read(&buffer->record_disabled)) |
2512 | goto out; | 2490 | goto out; |
@@ -2536,7 +2514,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
2536 | 2514 | ||
2537 | ret = 0; | 2515 | ret = 0; |
2538 | out: | 2516 | out: |
2539 | ftrace_preempt_enable(resched); | 2517 | preempt_enable_notrace(); |
2540 | 2518 | ||
2541 | return ret; | 2519 | return ret; |
2542 | } | 2520 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index d6736b93dc2a..ed1032d6f81d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -341,7 +341,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
341 | /* trace_flags holds trace_options default values */ | 341 | /* trace_flags holds trace_options default values */ |
342 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 342 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
343 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | | 343 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | |
344 | TRACE_ITER_GRAPH_TIME; | 344 | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD; |
345 | 345 | ||
346 | static int trace_stop_count; | 346 | static int trace_stop_count; |
347 | static DEFINE_SPINLOCK(tracing_start_lock); | 347 | static DEFINE_SPINLOCK(tracing_start_lock); |
@@ -425,6 +425,7 @@ static const char *trace_options[] = { | |||
425 | "latency-format", | 425 | "latency-format", |
426 | "sleep-time", | 426 | "sleep-time", |
427 | "graph-time", | 427 | "graph-time", |
428 | "record-cmd", | ||
428 | NULL | 429 | NULL |
429 | }; | 430 | }; |
430 | 431 | ||
@@ -656,6 +657,10 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
656 | return; | 657 | return; |
657 | 658 | ||
658 | WARN_ON_ONCE(!irqs_disabled()); | 659 | WARN_ON_ONCE(!irqs_disabled()); |
660 | if (!current_trace->use_max_tr) { | ||
661 | WARN_ON_ONCE(1); | ||
662 | return; | ||
663 | } | ||
659 | arch_spin_lock(&ftrace_max_lock); | 664 | arch_spin_lock(&ftrace_max_lock); |
660 | 665 | ||
661 | tr->buffer = max_tr.buffer; | 666 | tr->buffer = max_tr.buffer; |
@@ -682,6 +687,11 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
682 | return; | 687 | return; |
683 | 688 | ||
684 | WARN_ON_ONCE(!irqs_disabled()); | 689 | WARN_ON_ONCE(!irqs_disabled()); |
690 | if (!current_trace->use_max_tr) { | ||
691 | WARN_ON_ONCE(1); | ||
692 | return; | ||
693 | } | ||
694 | |||
685 | arch_spin_lock(&ftrace_max_lock); | 695 | arch_spin_lock(&ftrace_max_lock); |
686 | 696 | ||
687 | ftrace_disable_cpu(); | 697 | ftrace_disable_cpu(); |
@@ -726,7 +736,7 @@ __acquires(kernel_lock) | |||
726 | return -1; | 736 | return -1; |
727 | } | 737 | } |
728 | 738 | ||
729 | if (strlen(type->name) > MAX_TRACER_SIZE) { | 739 | if (strlen(type->name) >= MAX_TRACER_SIZE) { |
730 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); | 740 | pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE); |
731 | return -1; | 741 | return -1; |
732 | } | 742 | } |
@@ -1328,61 +1338,6 @@ static void __trace_userstack(struct trace_array *tr, unsigned long flags) | |||
1328 | 1338 | ||
1329 | #endif /* CONFIG_STACKTRACE */ | 1339 | #endif /* CONFIG_STACKTRACE */ |
1330 | 1340 | ||
1331 | static void | ||
1332 | ftrace_trace_special(void *__tr, | ||
1333 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | ||
1334 | int pc) | ||
1335 | { | ||
1336 | struct ftrace_event_call *call = &event_special; | ||
1337 | struct ring_buffer_event *event; | ||
1338 | struct trace_array *tr = __tr; | ||
1339 | struct ring_buffer *buffer = tr->buffer; | ||
1340 | struct special_entry *entry; | ||
1341 | |||
1342 | event = trace_buffer_lock_reserve(buffer, TRACE_SPECIAL, | ||
1343 | sizeof(*entry), 0, pc); | ||
1344 | if (!event) | ||
1345 | return; | ||
1346 | entry = ring_buffer_event_data(event); | ||
1347 | entry->arg1 = arg1; | ||
1348 | entry->arg2 = arg2; | ||
1349 | entry->arg3 = arg3; | ||
1350 | |||
1351 | if (!filter_check_discard(call, entry, buffer, event)) | ||
1352 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
1353 | } | ||
1354 | |||
1355 | void | ||
1356 | __trace_special(void *__tr, void *__data, | ||
1357 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1358 | { | ||
1359 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); | ||
1360 | } | ||
1361 | |||
1362 | void | ||
1363 | ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | ||
1364 | { | ||
1365 | struct trace_array *tr = &global_trace; | ||
1366 | struct trace_array_cpu *data; | ||
1367 | unsigned long flags; | ||
1368 | int cpu; | ||
1369 | int pc; | ||
1370 | |||
1371 | if (tracing_disabled) | ||
1372 | return; | ||
1373 | |||
1374 | pc = preempt_count(); | ||
1375 | local_irq_save(flags); | ||
1376 | cpu = raw_smp_processor_id(); | ||
1377 | data = tr->data[cpu]; | ||
1378 | |||
1379 | if (likely(atomic_inc_return(&data->disabled) == 1)) | ||
1380 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); | ||
1381 | |||
1382 | atomic_dec(&data->disabled); | ||
1383 | local_irq_restore(flags); | ||
1384 | } | ||
1385 | |||
1386 | /** | 1341 | /** |
1387 | * trace_vbprintk - write binary msg to tracing buffer | 1342 | * trace_vbprintk - write binary msg to tracing buffer |
1388 | * | 1343 | * |
@@ -1401,7 +1356,6 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1401 | struct bprint_entry *entry; | 1356 | struct bprint_entry *entry; |
1402 | unsigned long flags; | 1357 | unsigned long flags; |
1403 | int disable; | 1358 | int disable; |
1404 | int resched; | ||
1405 | int cpu, len = 0, size, pc; | 1359 | int cpu, len = 0, size, pc; |
1406 | 1360 | ||
1407 | if (unlikely(tracing_selftest_running || tracing_disabled)) | 1361 | if (unlikely(tracing_selftest_running || tracing_disabled)) |
@@ -1411,7 +1365,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) | |||
1411 | pause_graph_tracing(); | 1365 | pause_graph_tracing(); |
1412 | 1366 | ||
1413 | pc = preempt_count(); | 1367 | pc = preempt_count(); |
1414 | resched = ftrace_preempt_disable(); | 1368 | preempt_disable_notrace(); |
1415 | cpu = raw_smp_processor_id(); | 1369 | cpu = raw_smp_processor_id(); |
1416 | data = tr->data[cpu]; | 1370 | data = tr->data[cpu]; |
1417 | 1371 | ||
@@ -1449,7 +1403,7 @@ out_unlock: | |||
1449 | 1403 | ||
1450 | out: | 1404 | out: |
1451 | atomic_dec_return(&data->disabled); | 1405 | atomic_dec_return(&data->disabled); |
1452 | ftrace_preempt_enable(resched); | 1406 | preempt_enable_notrace(); |
1453 | unpause_graph_tracing(); | 1407 | unpause_graph_tracing(); |
1454 | 1408 | ||
1455 | return len; | 1409 | return len; |
@@ -2386,6 +2340,7 @@ static const struct file_operations show_traces_fops = { | |||
2386 | .open = show_traces_open, | 2340 | .open = show_traces_open, |
2387 | .read = seq_read, | 2341 | .read = seq_read, |
2388 | .release = seq_release, | 2342 | .release = seq_release, |
2343 | .llseek = seq_lseek, | ||
2389 | }; | 2344 | }; |
2390 | 2345 | ||
2391 | /* | 2346 | /* |
@@ -2479,6 +2434,7 @@ static const struct file_operations tracing_cpumask_fops = { | |||
2479 | .open = tracing_open_generic, | 2434 | .open = tracing_open_generic, |
2480 | .read = tracing_cpumask_read, | 2435 | .read = tracing_cpumask_read, |
2481 | .write = tracing_cpumask_write, | 2436 | .write = tracing_cpumask_write, |
2437 | .llseek = generic_file_llseek, | ||
2482 | }; | 2438 | }; |
2483 | 2439 | ||
2484 | static int tracing_trace_options_show(struct seq_file *m, void *v) | 2440 | static int tracing_trace_options_show(struct seq_file *m, void *v) |
@@ -2554,6 +2510,9 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
2554 | trace_flags |= mask; | 2510 | trace_flags |= mask; |
2555 | else | 2511 | else |
2556 | trace_flags &= ~mask; | 2512 | trace_flags &= ~mask; |
2513 | |||
2514 | if (mask == TRACE_ITER_RECORD_CMD) | ||
2515 | trace_event_enable_cmd_record(enabled); | ||
2557 | } | 2516 | } |
2558 | 2517 | ||
2559 | static ssize_t | 2518 | static ssize_t |
@@ -2645,6 +2604,7 @@ tracing_readme_read(struct file *filp, char __user *ubuf, | |||
2645 | static const struct file_operations tracing_readme_fops = { | 2604 | static const struct file_operations tracing_readme_fops = { |
2646 | .open = tracing_open_generic, | 2605 | .open = tracing_open_generic, |
2647 | .read = tracing_readme_read, | 2606 | .read = tracing_readme_read, |
2607 | .llseek = generic_file_llseek, | ||
2648 | }; | 2608 | }; |
2649 | 2609 | ||
2650 | static ssize_t | 2610 | static ssize_t |
@@ -2695,6 +2655,7 @@ tracing_saved_cmdlines_read(struct file *file, char __user *ubuf, | |||
2695 | static const struct file_operations tracing_saved_cmdlines_fops = { | 2655 | static const struct file_operations tracing_saved_cmdlines_fops = { |
2696 | .open = tracing_open_generic, | 2656 | .open = tracing_open_generic, |
2697 | .read = tracing_saved_cmdlines_read, | 2657 | .read = tracing_saved_cmdlines_read, |
2658 | .llseek = generic_file_llseek, | ||
2698 | }; | 2659 | }; |
2699 | 2660 | ||
2700 | static ssize_t | 2661 | static ssize_t |
@@ -2790,6 +2751,9 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2790 | if (ret < 0) | 2751 | if (ret < 0) |
2791 | return ret; | 2752 | return ret; |
2792 | 2753 | ||
2754 | if (!current_trace->use_max_tr) | ||
2755 | goto out; | ||
2756 | |||
2793 | ret = ring_buffer_resize(max_tr.buffer, size); | 2757 | ret = ring_buffer_resize(max_tr.buffer, size); |
2794 | if (ret < 0) { | 2758 | if (ret < 0) { |
2795 | int r; | 2759 | int r; |
@@ -2817,11 +2781,14 @@ static int tracing_resize_ring_buffer(unsigned long size) | |||
2817 | return ret; | 2781 | return ret; |
2818 | } | 2782 | } |
2819 | 2783 | ||
2784 | max_tr.entries = size; | ||
2785 | out: | ||
2820 | global_trace.entries = size; | 2786 | global_trace.entries = size; |
2821 | 2787 | ||
2822 | return ret; | 2788 | return ret; |
2823 | } | 2789 | } |
2824 | 2790 | ||
2791 | |||
2825 | /** | 2792 | /** |
2826 | * tracing_update_buffers - used by tracing facility to expand ring buffers | 2793 | * tracing_update_buffers - used by tracing facility to expand ring buffers |
2827 | * | 2794 | * |
@@ -2882,12 +2849,26 @@ static int tracing_set_tracer(const char *buf) | |||
2882 | trace_branch_disable(); | 2849 | trace_branch_disable(); |
2883 | if (current_trace && current_trace->reset) | 2850 | if (current_trace && current_trace->reset) |
2884 | current_trace->reset(tr); | 2851 | current_trace->reset(tr); |
2885 | 2852 | if (current_trace && current_trace->use_max_tr) { | |
2853 | /* | ||
2854 | * We don't free the ring buffer. instead, resize it because | ||
2855 | * The max_tr ring buffer has some state (e.g. ring->clock) and | ||
2856 | * we want preserve it. | ||
2857 | */ | ||
2858 | ring_buffer_resize(max_tr.buffer, 1); | ||
2859 | max_tr.entries = 1; | ||
2860 | } | ||
2886 | destroy_trace_option_files(topts); | 2861 | destroy_trace_option_files(topts); |
2887 | 2862 | ||
2888 | current_trace = t; | 2863 | current_trace = t; |
2889 | 2864 | ||
2890 | topts = create_trace_option_files(current_trace); | 2865 | topts = create_trace_option_files(current_trace); |
2866 | if (current_trace->use_max_tr) { | ||
2867 | ret = ring_buffer_resize(max_tr.buffer, global_trace.entries); | ||
2868 | if (ret < 0) | ||
2869 | goto out; | ||
2870 | max_tr.entries = global_trace.entries; | ||
2871 | } | ||
2891 | 2872 | ||
2892 | if (t->init) { | 2873 | if (t->init) { |
2893 | ret = tracer_init(t, tr); | 2874 | ret = tracer_init(t, tr); |
@@ -3024,6 +3005,7 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
3024 | if (iter->trace->pipe_open) | 3005 | if (iter->trace->pipe_open) |
3025 | iter->trace->pipe_open(iter); | 3006 | iter->trace->pipe_open(iter); |
3026 | 3007 | ||
3008 | nonseekable_open(inode, filp); | ||
3027 | out: | 3009 | out: |
3028 | mutex_unlock(&trace_types_lock); | 3010 | mutex_unlock(&trace_types_lock); |
3029 | return ret; | 3011 | return ret; |
@@ -3469,7 +3451,6 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3469 | } | 3451 | } |
3470 | 3452 | ||
3471 | tracing_start(); | 3453 | tracing_start(); |
3472 | max_tr.entries = global_trace.entries; | ||
3473 | mutex_unlock(&trace_types_lock); | 3454 | mutex_unlock(&trace_types_lock); |
3474 | 3455 | ||
3475 | return cnt; | 3456 | return cnt; |
@@ -3582,18 +3563,21 @@ static const struct file_operations tracing_max_lat_fops = { | |||
3582 | .open = tracing_open_generic, | 3563 | .open = tracing_open_generic, |
3583 | .read = tracing_max_lat_read, | 3564 | .read = tracing_max_lat_read, |
3584 | .write = tracing_max_lat_write, | 3565 | .write = tracing_max_lat_write, |
3566 | .llseek = generic_file_llseek, | ||
3585 | }; | 3567 | }; |
3586 | 3568 | ||
3587 | static const struct file_operations tracing_ctrl_fops = { | 3569 | static const struct file_operations tracing_ctrl_fops = { |
3588 | .open = tracing_open_generic, | 3570 | .open = tracing_open_generic, |
3589 | .read = tracing_ctrl_read, | 3571 | .read = tracing_ctrl_read, |
3590 | .write = tracing_ctrl_write, | 3572 | .write = tracing_ctrl_write, |
3573 | .llseek = generic_file_llseek, | ||
3591 | }; | 3574 | }; |
3592 | 3575 | ||
3593 | static const struct file_operations set_tracer_fops = { | 3576 | static const struct file_operations set_tracer_fops = { |
3594 | .open = tracing_open_generic, | 3577 | .open = tracing_open_generic, |
3595 | .read = tracing_set_trace_read, | 3578 | .read = tracing_set_trace_read, |
3596 | .write = tracing_set_trace_write, | 3579 | .write = tracing_set_trace_write, |
3580 | .llseek = generic_file_llseek, | ||
3597 | }; | 3581 | }; |
3598 | 3582 | ||
3599 | static const struct file_operations tracing_pipe_fops = { | 3583 | static const struct file_operations tracing_pipe_fops = { |
@@ -3602,17 +3586,20 @@ static const struct file_operations tracing_pipe_fops = { | |||
3602 | .read = tracing_read_pipe, | 3586 | .read = tracing_read_pipe, |
3603 | .splice_read = tracing_splice_read_pipe, | 3587 | .splice_read = tracing_splice_read_pipe, |
3604 | .release = tracing_release_pipe, | 3588 | .release = tracing_release_pipe, |
3589 | .llseek = no_llseek, | ||
3605 | }; | 3590 | }; |
3606 | 3591 | ||
3607 | static const struct file_operations tracing_entries_fops = { | 3592 | static const struct file_operations tracing_entries_fops = { |
3608 | .open = tracing_open_generic, | 3593 | .open = tracing_open_generic, |
3609 | .read = tracing_entries_read, | 3594 | .read = tracing_entries_read, |
3610 | .write = tracing_entries_write, | 3595 | .write = tracing_entries_write, |
3596 | .llseek = generic_file_llseek, | ||
3611 | }; | 3597 | }; |
3612 | 3598 | ||
3613 | static const struct file_operations tracing_mark_fops = { | 3599 | static const struct file_operations tracing_mark_fops = { |
3614 | .open = tracing_open_generic, | 3600 | .open = tracing_open_generic, |
3615 | .write = tracing_mark_write, | 3601 | .write = tracing_mark_write, |
3602 | .llseek = generic_file_llseek, | ||
3616 | }; | 3603 | }; |
3617 | 3604 | ||
3618 | static const struct file_operations trace_clock_fops = { | 3605 | static const struct file_operations trace_clock_fops = { |
@@ -3918,6 +3905,7 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
3918 | static const struct file_operations tracing_stats_fops = { | 3905 | static const struct file_operations tracing_stats_fops = { |
3919 | .open = tracing_open_generic, | 3906 | .open = tracing_open_generic, |
3920 | .read = tracing_stats_read, | 3907 | .read = tracing_stats_read, |
3908 | .llseek = generic_file_llseek, | ||
3921 | }; | 3909 | }; |
3922 | 3910 | ||
3923 | #ifdef CONFIG_DYNAMIC_FTRACE | 3911 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -3954,6 +3942,7 @@ tracing_read_dyn_info(struct file *filp, char __user *ubuf, | |||
3954 | static const struct file_operations tracing_dyn_info_fops = { | 3942 | static const struct file_operations tracing_dyn_info_fops = { |
3955 | .open = tracing_open_generic, | 3943 | .open = tracing_open_generic, |
3956 | .read = tracing_read_dyn_info, | 3944 | .read = tracing_read_dyn_info, |
3945 | .llseek = generic_file_llseek, | ||
3957 | }; | 3946 | }; |
3958 | #endif | 3947 | #endif |
3959 | 3948 | ||
@@ -4107,6 +4096,7 @@ static const struct file_operations trace_options_fops = { | |||
4107 | .open = tracing_open_generic, | 4096 | .open = tracing_open_generic, |
4108 | .read = trace_options_read, | 4097 | .read = trace_options_read, |
4109 | .write = trace_options_write, | 4098 | .write = trace_options_write, |
4099 | .llseek = generic_file_llseek, | ||
4110 | }; | 4100 | }; |
4111 | 4101 | ||
4112 | static ssize_t | 4102 | static ssize_t |
@@ -4158,6 +4148,7 @@ static const struct file_operations trace_options_core_fops = { | |||
4158 | .open = tracing_open_generic, | 4148 | .open = tracing_open_generic, |
4159 | .read = trace_options_core_read, | 4149 | .read = trace_options_core_read, |
4160 | .write = trace_options_core_write, | 4150 | .write = trace_options_core_write, |
4151 | .llseek = generic_file_llseek, | ||
4161 | }; | 4152 | }; |
4162 | 4153 | ||
4163 | struct dentry *trace_create_file(const char *name, | 4154 | struct dentry *trace_create_file(const char *name, |
@@ -4347,9 +4338,6 @@ static __init int tracer_init_debugfs(void) | |||
4347 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 4338 | trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
4348 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); | 4339 | &ftrace_update_tot_cnt, &tracing_dyn_info_fops); |
4349 | #endif | 4340 | #endif |
4350 | #ifdef CONFIG_SYSPROF_TRACER | ||
4351 | init_tracer_sysprof_debugfs(d_tracer); | ||
4352 | #endif | ||
4353 | 4341 | ||
4354 | create_trace_options_dir(); | 4342 | create_trace_options_dir(); |
4355 | 4343 | ||
@@ -4576,16 +4564,14 @@ __init static int tracer_alloc_buffers(void) | |||
4576 | 4564 | ||
4577 | 4565 | ||
4578 | #ifdef CONFIG_TRACER_MAX_TRACE | 4566 | #ifdef CONFIG_TRACER_MAX_TRACE |
4579 | max_tr.buffer = ring_buffer_alloc(ring_buf_size, | 4567 | max_tr.buffer = ring_buffer_alloc(1, TRACE_BUFFER_FLAGS); |
4580 | TRACE_BUFFER_FLAGS); | ||
4581 | if (!max_tr.buffer) { | 4568 | if (!max_tr.buffer) { |
4582 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); | 4569 | printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n"); |
4583 | WARN_ON(1); | 4570 | WARN_ON(1); |
4584 | ring_buffer_free(global_trace.buffer); | 4571 | ring_buffer_free(global_trace.buffer); |
4585 | goto out_free_cpumask; | 4572 | goto out_free_cpumask; |
4586 | } | 4573 | } |
4587 | max_tr.entries = ring_buffer_size(max_tr.buffer); | 4574 | max_tr.entries = 1; |
4588 | WARN_ON(max_tr.entries != global_trace.entries); | ||
4589 | #endif | 4575 | #endif |
4590 | 4576 | ||
4591 | /* Allocate the first page for all buffers */ | 4577 | /* Allocate the first page for all buffers */ |
@@ -4598,9 +4584,6 @@ __init static int tracer_alloc_buffers(void) | |||
4598 | 4584 | ||
4599 | register_tracer(&nop_trace); | 4585 | register_tracer(&nop_trace); |
4600 | current_trace = &nop_trace; | 4586 | current_trace = &nop_trace; |
4601 | #ifdef CONFIG_BOOT_TRACER | ||
4602 | register_tracer(&boot_tracer); | ||
4603 | #endif | ||
4604 | /* All seems OK, enable tracing */ | 4587 | /* All seems OK, enable tracing */ |
4605 | tracing_disabled = 0; | 4588 | tracing_disabled = 0; |
4606 | 4589 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 0605fc00c176..d39b3c5454a5 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -9,10 +9,7 @@ | |||
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/tracepoint.h> | 10 | #include <linux/tracepoint.h> |
11 | #include <linux/ftrace.h> | 11 | #include <linux/ftrace.h> |
12 | #include <trace/boot.h> | ||
13 | #include <linux/kmemtrace.h> | ||
14 | #include <linux/hw_breakpoint.h> | 12 | #include <linux/hw_breakpoint.h> |
15 | |||
16 | #include <linux/trace_seq.h> | 13 | #include <linux/trace_seq.h> |
17 | #include <linux/ftrace_event.h> | 14 | #include <linux/ftrace_event.h> |
18 | 15 | ||
@@ -25,30 +22,17 @@ enum trace_type { | |||
25 | TRACE_STACK, | 22 | TRACE_STACK, |
26 | TRACE_PRINT, | 23 | TRACE_PRINT, |
27 | TRACE_BPRINT, | 24 | TRACE_BPRINT, |
28 | TRACE_SPECIAL, | ||
29 | TRACE_MMIO_RW, | 25 | TRACE_MMIO_RW, |
30 | TRACE_MMIO_MAP, | 26 | TRACE_MMIO_MAP, |
31 | TRACE_BRANCH, | 27 | TRACE_BRANCH, |
32 | TRACE_BOOT_CALL, | ||
33 | TRACE_BOOT_RET, | ||
34 | TRACE_GRAPH_RET, | 28 | TRACE_GRAPH_RET, |
35 | TRACE_GRAPH_ENT, | 29 | TRACE_GRAPH_ENT, |
36 | TRACE_USER_STACK, | 30 | TRACE_USER_STACK, |
37 | TRACE_KMEM_ALLOC, | ||
38 | TRACE_KMEM_FREE, | ||
39 | TRACE_BLK, | 31 | TRACE_BLK, |
40 | TRACE_KSYM, | ||
41 | 32 | ||
42 | __TRACE_LAST_TYPE, | 33 | __TRACE_LAST_TYPE, |
43 | }; | 34 | }; |
44 | 35 | ||
45 | enum kmemtrace_type_id { | ||
46 | KMEMTRACE_TYPE_KMALLOC = 0, /* kmalloc() or kfree(). */ | ||
47 | KMEMTRACE_TYPE_CACHE, /* kmem_cache_*(). */ | ||
48 | KMEMTRACE_TYPE_PAGES, /* __get_free_pages() and friends. */ | ||
49 | }; | ||
50 | |||
51 | extern struct tracer boot_tracer; | ||
52 | 36 | ||
53 | #undef __field | 37 | #undef __field |
54 | #define __field(type, item) type item; | 38 | #define __field(type, item) type item; |
@@ -204,23 +188,15 @@ extern void __ftrace_bad_type(void); | |||
204 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 188 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
205 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 189 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
206 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ | 190 | IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ |
207 | IF_ASSIGN(var, ent, struct special_entry, 0); \ | ||
208 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ | 191 | IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ |
209 | TRACE_MMIO_RW); \ | 192 | TRACE_MMIO_RW); \ |
210 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ | 193 | IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ |
211 | TRACE_MMIO_MAP); \ | 194 | TRACE_MMIO_MAP); \ |
212 | IF_ASSIGN(var, ent, struct trace_boot_call, TRACE_BOOT_CALL);\ | ||
213 | IF_ASSIGN(var, ent, struct trace_boot_ret, TRACE_BOOT_RET);\ | ||
214 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ | 195 | IF_ASSIGN(var, ent, struct trace_branch, TRACE_BRANCH); \ |
215 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ | 196 | IF_ASSIGN(var, ent, struct ftrace_graph_ent_entry, \ |
216 | TRACE_GRAPH_ENT); \ | 197 | TRACE_GRAPH_ENT); \ |
217 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ | 198 | IF_ASSIGN(var, ent, struct ftrace_graph_ret_entry, \ |
218 | TRACE_GRAPH_RET); \ | 199 | TRACE_GRAPH_RET); \ |
219 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
220 | TRACE_KMEM_ALLOC); \ | ||
221 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
222 | TRACE_KMEM_FREE); \ | ||
223 | IF_ASSIGN(var, ent, struct ksym_trace_entry, TRACE_KSYM);\ | ||
224 | __ftrace_bad_type(); \ | 200 | __ftrace_bad_type(); \ |
225 | } while (0) | 201 | } while (0) |
226 | 202 | ||
@@ -298,6 +274,7 @@ struct tracer { | |||
298 | struct tracer *next; | 274 | struct tracer *next; |
299 | int print_max; | 275 | int print_max; |
300 | struct tracer_flags *flags; | 276 | struct tracer_flags *flags; |
277 | int use_max_tr; | ||
301 | }; | 278 | }; |
302 | 279 | ||
303 | 280 | ||
@@ -318,7 +295,6 @@ struct dentry *trace_create_file(const char *name, | |||
318 | const struct file_operations *fops); | 295 | const struct file_operations *fops); |
319 | 296 | ||
320 | struct dentry *tracing_init_dentry(void); | 297 | struct dentry *tracing_init_dentry(void); |
321 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | ||
322 | 298 | ||
323 | struct ring_buffer_event; | 299 | struct ring_buffer_event; |
324 | 300 | ||
@@ -363,11 +339,6 @@ void tracing_sched_wakeup_trace(struct trace_array *tr, | |||
363 | struct task_struct *wakee, | 339 | struct task_struct *wakee, |
364 | struct task_struct *cur, | 340 | struct task_struct *cur, |
365 | unsigned long flags, int pc); | 341 | unsigned long flags, int pc); |
366 | void trace_special(struct trace_array *tr, | ||
367 | struct trace_array_cpu *data, | ||
368 | unsigned long arg1, | ||
369 | unsigned long arg2, | ||
370 | unsigned long arg3, int pc); | ||
371 | void trace_function(struct trace_array *tr, | 342 | void trace_function(struct trace_array *tr, |
372 | unsigned long ip, | 343 | unsigned long ip, |
373 | unsigned long parent_ip, | 344 | unsigned long parent_ip, |
@@ -398,8 +369,6 @@ extern cpumask_var_t __read_mostly tracing_buffer_mask; | |||
398 | #define for_each_tracing_cpu(cpu) \ | 369 | #define for_each_tracing_cpu(cpu) \ |
399 | for_each_cpu(cpu, tracing_buffer_mask) | 370 | for_each_cpu(cpu, tracing_buffer_mask) |
400 | 371 | ||
401 | extern int process_new_ksym_entry(char *ksymname, int op, unsigned long addr); | ||
402 | |||
403 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); | 372 | extern unsigned long nsecs_to_usecs(unsigned long nsecs); |
404 | 373 | ||
405 | extern unsigned long tracing_thresh; | 374 | extern unsigned long tracing_thresh; |
@@ -469,12 +438,8 @@ extern int trace_selftest_startup_nop(struct tracer *trace, | |||
469 | struct trace_array *tr); | 438 | struct trace_array *tr); |
470 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, | 439 | extern int trace_selftest_startup_sched_switch(struct tracer *trace, |
471 | struct trace_array *tr); | 440 | struct trace_array *tr); |
472 | extern int trace_selftest_startup_sysprof(struct tracer *trace, | ||
473 | struct trace_array *tr); | ||
474 | extern int trace_selftest_startup_branch(struct tracer *trace, | 441 | extern int trace_selftest_startup_branch(struct tracer *trace, |
475 | struct trace_array *tr); | 442 | struct trace_array *tr); |
476 | extern int trace_selftest_startup_ksym(struct tracer *trace, | ||
477 | struct trace_array *tr); | ||
478 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 443 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
479 | 444 | ||
480 | extern void *head_page(struct trace_array_cpu *data); | 445 | extern void *head_page(struct trace_array_cpu *data); |
@@ -636,6 +601,7 @@ enum trace_iterator_flags { | |||
636 | TRACE_ITER_LATENCY_FMT = 0x20000, | 601 | TRACE_ITER_LATENCY_FMT = 0x20000, |
637 | TRACE_ITER_SLEEP_TIME = 0x40000, | 602 | TRACE_ITER_SLEEP_TIME = 0x40000, |
638 | TRACE_ITER_GRAPH_TIME = 0x80000, | 603 | TRACE_ITER_GRAPH_TIME = 0x80000, |
604 | TRACE_ITER_RECORD_CMD = 0x100000, | ||
639 | }; | 605 | }; |
640 | 606 | ||
641 | /* | 607 | /* |
@@ -647,54 +613,6 @@ enum trace_iterator_flags { | |||
647 | 613 | ||
648 | extern struct tracer nop_trace; | 614 | extern struct tracer nop_trace; |
649 | 615 | ||
650 | /** | ||
651 | * ftrace_preempt_disable - disable preemption scheduler safe | ||
652 | * | ||
653 | * When tracing can happen inside the scheduler, there exists | ||
654 | * cases that the tracing might happen before the need_resched | ||
655 | * flag is checked. If this happens and the tracer calls | ||
656 | * preempt_enable (after a disable), a schedule might take place | ||
657 | * causing an infinite recursion. | ||
658 | * | ||
659 | * To prevent this, we read the need_resched flag before | ||
660 | * disabling preemption. When we want to enable preemption we | ||
661 | * check the flag, if it is set, then we call preempt_enable_no_resched. | ||
662 | * Otherwise, we call preempt_enable. | ||
663 | * | ||
664 | * The rational for doing the above is that if need_resched is set | ||
665 | * and we have yet to reschedule, we are either in an atomic location | ||
666 | * (where we do not need to check for scheduling) or we are inside | ||
667 | * the scheduler and do not want to resched. | ||
668 | */ | ||
669 | static inline int ftrace_preempt_disable(void) | ||
670 | { | ||
671 | int resched; | ||
672 | |||
673 | resched = need_resched(); | ||
674 | preempt_disable_notrace(); | ||
675 | |||
676 | return resched; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * ftrace_preempt_enable - enable preemption scheduler safe | ||
681 | * @resched: the return value from ftrace_preempt_disable | ||
682 | * | ||
683 | * This is a scheduler safe way to enable preemption and not miss | ||
684 | * any preemption checks. The disabled saved the state of preemption. | ||
685 | * If resched is set, then we are either inside an atomic or | ||
686 | * are inside the scheduler (we would have already scheduled | ||
687 | * otherwise). In this case, we do not want to call normal | ||
688 | * preempt_enable, but preempt_enable_no_resched instead. | ||
689 | */ | ||
690 | static inline void ftrace_preempt_enable(int resched) | ||
691 | { | ||
692 | if (resched) | ||
693 | preempt_enable_no_resched_notrace(); | ||
694 | else | ||
695 | preempt_enable_notrace(); | ||
696 | } | ||
697 | |||
698 | #ifdef CONFIG_BRANCH_TRACER | 616 | #ifdef CONFIG_BRANCH_TRACER |
699 | extern int enable_branch_tracing(struct trace_array *tr); | 617 | extern int enable_branch_tracing(struct trace_array *tr); |
700 | extern void disable_branch_tracing(void); | 618 | extern void disable_branch_tracing(void); |
@@ -785,6 +703,8 @@ struct filter_pred { | |||
785 | int pop_n; | 703 | int pop_n; |
786 | }; | 704 | }; |
787 | 705 | ||
706 | extern struct list_head ftrace_common_fields; | ||
707 | |||
788 | extern enum regex_type | 708 | extern enum regex_type |
789 | filter_parse_regex(char *buff, int len, char **search, int *not); | 709 | filter_parse_regex(char *buff, int len, char **search, int *not); |
790 | extern void print_event_filter(struct ftrace_event_call *call, | 710 | extern void print_event_filter(struct ftrace_event_call *call, |
@@ -814,6 +734,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, | |||
814 | return 0; | 734 | return 0; |
815 | } | 735 | } |
816 | 736 | ||
737 | extern void trace_event_enable_cmd_record(bool enable); | ||
738 | |||
817 | extern struct mutex event_mutex; | 739 | extern struct mutex event_mutex; |
818 | extern struct list_head ftrace_events; | 740 | extern struct list_head ftrace_events; |
819 | 741 | ||
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c deleted file mode 100644 index c21d5f3956ad..000000000000 --- a/kernel/trace/trace_boot.c +++ /dev/null | |||
@@ -1,185 +0,0 @@ | |||
1 | /* | ||
2 | * ring buffer based initcalls tracer | ||
3 | * | ||
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/ftrace.h> | ||
11 | #include <linux/kallsyms.h> | ||
12 | #include <linux/time.h> | ||
13 | |||
14 | #include "trace.h" | ||
15 | #include "trace_output.h" | ||
16 | |||
17 | static struct trace_array *boot_trace; | ||
18 | static bool pre_initcalls_finished; | ||
19 | |||
20 | /* Tells the boot tracer that the pre_smp_initcalls are finished. | ||
21 | * So we are ready . | ||
22 | * It doesn't enable sched events tracing however. | ||
23 | * You have to call enable_boot_trace to do so. | ||
24 | */ | ||
25 | void start_boot_trace(void) | ||
26 | { | ||
27 | pre_initcalls_finished = true; | ||
28 | } | ||
29 | |||
30 | void enable_boot_trace(void) | ||
31 | { | ||
32 | if (boot_trace && pre_initcalls_finished) | ||
33 | tracing_start_sched_switch_record(); | ||
34 | } | ||
35 | |||
36 | void disable_boot_trace(void) | ||
37 | { | ||
38 | if (boot_trace && pre_initcalls_finished) | ||
39 | tracing_stop_sched_switch_record(); | ||
40 | } | ||
41 | |||
42 | static int boot_trace_init(struct trace_array *tr) | ||
43 | { | ||
44 | boot_trace = tr; | ||
45 | |||
46 | if (!tr) | ||
47 | return 0; | ||
48 | |||
49 | tracing_reset_online_cpus(tr); | ||
50 | |||
51 | tracing_sched_switch_assign_trace(tr); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static enum print_line_t | ||
56 | initcall_call_print_line(struct trace_iterator *iter) | ||
57 | { | ||
58 | struct trace_entry *entry = iter->ent; | ||
59 | struct trace_seq *s = &iter->seq; | ||
60 | struct trace_boot_call *field; | ||
61 | struct boot_trace_call *call; | ||
62 | u64 ts; | ||
63 | unsigned long nsec_rem; | ||
64 | int ret; | ||
65 | |||
66 | trace_assign_type(field, entry); | ||
67 | call = &field->boot_call; | ||
68 | ts = iter->ts; | ||
69 | nsec_rem = do_div(ts, NSEC_PER_SEC); | ||
70 | |||
71 | ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n", | ||
72 | (unsigned long)ts, nsec_rem, call->func, call->caller); | ||
73 | |||
74 | if (!ret) | ||
75 | return TRACE_TYPE_PARTIAL_LINE; | ||
76 | else | ||
77 | return TRACE_TYPE_HANDLED; | ||
78 | } | ||
79 | |||
80 | static enum print_line_t | ||
81 | initcall_ret_print_line(struct trace_iterator *iter) | ||
82 | { | ||
83 | struct trace_entry *entry = iter->ent; | ||
84 | struct trace_seq *s = &iter->seq; | ||
85 | struct trace_boot_ret *field; | ||
86 | struct boot_trace_ret *init_ret; | ||
87 | u64 ts; | ||
88 | unsigned long nsec_rem; | ||
89 | int ret; | ||
90 | |||
91 | trace_assign_type(field, entry); | ||
92 | init_ret = &field->boot_ret; | ||
93 | ts = iter->ts; | ||
94 | nsec_rem = do_div(ts, NSEC_PER_SEC); | ||
95 | |||
96 | ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s " | ||
97 | "returned %d after %llu msecs\n", | ||
98 | (unsigned long) ts, | ||
99 | nsec_rem, | ||
100 | init_ret->func, init_ret->result, init_ret->duration); | ||
101 | |||
102 | if (!ret) | ||
103 | return TRACE_TYPE_PARTIAL_LINE; | ||
104 | else | ||
105 | return TRACE_TYPE_HANDLED; | ||
106 | } | ||
107 | |||
108 | static enum print_line_t initcall_print_line(struct trace_iterator *iter) | ||
109 | { | ||
110 | struct trace_entry *entry = iter->ent; | ||
111 | |||
112 | switch (entry->type) { | ||
113 | case TRACE_BOOT_CALL: | ||
114 | return initcall_call_print_line(iter); | ||
115 | case TRACE_BOOT_RET: | ||
116 | return initcall_ret_print_line(iter); | ||
117 | default: | ||
118 | return TRACE_TYPE_UNHANDLED; | ||
119 | } | ||
120 | } | ||
121 | |||
122 | struct tracer boot_tracer __read_mostly = | ||
123 | { | ||
124 | .name = "initcall", | ||
125 | .init = boot_trace_init, | ||
126 | .reset = tracing_reset_online_cpus, | ||
127 | .print_line = initcall_print_line, | ||
128 | }; | ||
129 | |||
130 | void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | ||
131 | { | ||
132 | struct ftrace_event_call *call = &event_boot_call; | ||
133 | struct ring_buffer_event *event; | ||
134 | struct ring_buffer *buffer; | ||
135 | struct trace_boot_call *entry; | ||
136 | struct trace_array *tr = boot_trace; | ||
137 | |||
138 | if (!tr || !pre_initcalls_finished) | ||
139 | return; | ||
140 | |||
141 | /* Get its name now since this function could | ||
142 | * disappear because it is in the .init section. | ||
143 | */ | ||
144 | sprint_symbol(bt->func, (unsigned long)fn); | ||
145 | preempt_disable(); | ||
146 | |||
147 | buffer = tr->buffer; | ||
148 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_CALL, | ||
149 | sizeof(*entry), 0, 0); | ||
150 | if (!event) | ||
151 | goto out; | ||
152 | entry = ring_buffer_event_data(event); | ||
153 | entry->boot_call = *bt; | ||
154 | if (!filter_check_discard(call, entry, buffer, event)) | ||
155 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
156 | out: | ||
157 | preempt_enable(); | ||
158 | } | ||
159 | |||
160 | void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | ||
161 | { | ||
162 | struct ftrace_event_call *call = &event_boot_ret; | ||
163 | struct ring_buffer_event *event; | ||
164 | struct ring_buffer *buffer; | ||
165 | struct trace_boot_ret *entry; | ||
166 | struct trace_array *tr = boot_trace; | ||
167 | |||
168 | if (!tr || !pre_initcalls_finished) | ||
169 | return; | ||
170 | |||
171 | sprint_symbol(bt->func, (unsigned long)fn); | ||
172 | preempt_disable(); | ||
173 | |||
174 | buffer = tr->buffer; | ||
175 | event = trace_buffer_lock_reserve(buffer, TRACE_BOOT_RET, | ||
176 | sizeof(*entry), 0, 0); | ||
177 | if (!event) | ||
178 | goto out; | ||
179 | entry = ring_buffer_event_data(event); | ||
180 | entry->boot_ret = *bt; | ||
181 | if (!filter_check_discard(call, entry, buffer, event)) | ||
182 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
183 | out: | ||
184 | preempt_enable(); | ||
185 | } | ||
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 9d589d8dcd1a..52fda6c04ac3 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -32,16 +32,15 @@ | |||
32 | u64 notrace trace_clock_local(void) | 32 | u64 notrace trace_clock_local(void) |
33 | { | 33 | { |
34 | u64 clock; | 34 | u64 clock; |
35 | int resched; | ||
36 | 35 | ||
37 | /* | 36 | /* |
38 | * sched_clock() is an architecture implemented, fast, scalable, | 37 | * sched_clock() is an architecture implemented, fast, scalable, |
39 | * lockless clock. It is not guaranteed to be coherent across | 38 | * lockless clock. It is not guaranteed to be coherent across |
40 | * CPUs, nor across CPU idle events. | 39 | * CPUs, nor across CPU idle events. |
41 | */ | 40 | */ |
42 | resched = ftrace_preempt_disable(); | 41 | preempt_disable_notrace(); |
43 | clock = sched_clock(); | 42 | clock = sched_clock(); |
44 | ftrace_preempt_enable(resched); | 43 | preempt_enable_notrace(); |
45 | 44 | ||
46 | return clock; | 45 | return clock; |
47 | } | 46 | } |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index dc008c1240da..e3dfecaf13e6 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -151,23 +151,6 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry, | |||
151 | ); | 151 | ); |
152 | 152 | ||
153 | /* | 153 | /* |
154 | * Special (free-form) trace entry: | ||
155 | */ | ||
156 | FTRACE_ENTRY(special, special_entry, | ||
157 | |||
158 | TRACE_SPECIAL, | ||
159 | |||
160 | F_STRUCT( | ||
161 | __field( unsigned long, arg1 ) | ||
162 | __field( unsigned long, arg2 ) | ||
163 | __field( unsigned long, arg3 ) | ||
164 | ), | ||
165 | |||
166 | F_printk("(%08lx) (%08lx) (%08lx)", | ||
167 | __entry->arg1, __entry->arg2, __entry->arg3) | ||
168 | ); | ||
169 | |||
170 | /* | ||
171 | * Stack-trace entry: | 154 | * Stack-trace entry: |
172 | */ | 155 | */ |
173 | 156 | ||
@@ -271,33 +254,6 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map, | |||
271 | __entry->map_id, __entry->opcode) | 254 | __entry->map_id, __entry->opcode) |
272 | ); | 255 | ); |
273 | 256 | ||
274 | FTRACE_ENTRY(boot_call, trace_boot_call, | ||
275 | |||
276 | TRACE_BOOT_CALL, | ||
277 | |||
278 | F_STRUCT( | ||
279 | __field_struct( struct boot_trace_call, boot_call ) | ||
280 | __field_desc( pid_t, boot_call, caller ) | ||
281 | __array_desc( char, boot_call, func, KSYM_SYMBOL_LEN) | ||
282 | ), | ||
283 | |||
284 | F_printk("%d %s", __entry->caller, __entry->func) | ||
285 | ); | ||
286 | |||
287 | FTRACE_ENTRY(boot_ret, trace_boot_ret, | ||
288 | |||
289 | TRACE_BOOT_RET, | ||
290 | |||
291 | F_STRUCT( | ||
292 | __field_struct( struct boot_trace_ret, boot_ret ) | ||
293 | __array_desc( char, boot_ret, func, KSYM_SYMBOL_LEN) | ||
294 | __field_desc( int, boot_ret, result ) | ||
295 | __field_desc( unsigned long, boot_ret, duration ) | ||
296 | ), | ||
297 | |||
298 | F_printk("%s %d %lx", | ||
299 | __entry->func, __entry->result, __entry->duration) | ||
300 | ); | ||
301 | 257 | ||
302 | #define TRACE_FUNC_SIZE 30 | 258 | #define TRACE_FUNC_SIZE 30 |
303 | #define TRACE_FILE_SIZE 20 | 259 | #define TRACE_FILE_SIZE 20 |
@@ -318,53 +274,3 @@ FTRACE_ENTRY(branch, trace_branch, | |||
318 | __entry->func, __entry->file, __entry->correct) | 274 | __entry->func, __entry->file, __entry->correct) |
319 | ); | 275 | ); |
320 | 276 | ||
321 | FTRACE_ENTRY(kmem_alloc, kmemtrace_alloc_entry, | ||
322 | |||
323 | TRACE_KMEM_ALLOC, | ||
324 | |||
325 | F_STRUCT( | ||
326 | __field( enum kmemtrace_type_id, type_id ) | ||
327 | __field( unsigned long, call_site ) | ||
328 | __field( const void *, ptr ) | ||
329 | __field( size_t, bytes_req ) | ||
330 | __field( size_t, bytes_alloc ) | ||
331 | __field( gfp_t, gfp_flags ) | ||
332 | __field( int, node ) | ||
333 | ), | ||
334 | |||
335 | F_printk("type:%u call_site:%lx ptr:%p req:%zi alloc:%zi" | ||
336 | " flags:%x node:%d", | ||
337 | __entry->type_id, __entry->call_site, __entry->ptr, | ||
338 | __entry->bytes_req, __entry->bytes_alloc, | ||
339 | __entry->gfp_flags, __entry->node) | ||
340 | ); | ||
341 | |||
342 | FTRACE_ENTRY(kmem_free, kmemtrace_free_entry, | ||
343 | |||
344 | TRACE_KMEM_FREE, | ||
345 | |||
346 | F_STRUCT( | ||
347 | __field( enum kmemtrace_type_id, type_id ) | ||
348 | __field( unsigned long, call_site ) | ||
349 | __field( const void *, ptr ) | ||
350 | ), | ||
351 | |||
352 | F_printk("type:%u call_site:%lx ptr:%p", | ||
353 | __entry->type_id, __entry->call_site, __entry->ptr) | ||
354 | ); | ||
355 | |||
356 | FTRACE_ENTRY(ksym_trace, ksym_trace_entry, | ||
357 | |||
358 | TRACE_KSYM, | ||
359 | |||
360 | F_STRUCT( | ||
361 | __field( unsigned long, ip ) | ||
362 | __field( unsigned char, type ) | ||
363 | __array( char , cmd, TASK_COMM_LEN ) | ||
364 | __field( unsigned long, addr ) | ||
365 | ), | ||
366 | |||
367 | F_printk("ip: %pF type: %d ksym_name: %pS cmd: %s", | ||
368 | (void *)__entry->ip, (unsigned int)__entry->type, | ||
369 | (void *)__entry->addr, __entry->cmd) | ||
370 | ); | ||
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c index 8a2b73f7c068..000e6e85b445 100644 --- a/kernel/trace/trace_event_perf.c +++ b/kernel/trace/trace_event_perf.c | |||
@@ -9,8 +9,6 @@ | |||
9 | #include <linux/kprobes.h> | 9 | #include <linux/kprobes.h> |
10 | #include "trace.h" | 10 | #include "trace.h" |
11 | 11 | ||
12 | EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs); | ||
13 | |||
14 | static char *perf_trace_buf[4]; | 12 | static char *perf_trace_buf[4]; |
15 | 13 | ||
16 | /* | 14 | /* |
@@ -56,13 +54,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, | |||
56 | } | 54 | } |
57 | } | 55 | } |
58 | 56 | ||
59 | if (tp_event->class->reg) | 57 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); |
60 | ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER); | ||
61 | else | ||
62 | ret = tracepoint_probe_register(tp_event->name, | ||
63 | tp_event->class->perf_probe, | ||
64 | tp_event); | ||
65 | |||
66 | if (ret) | 58 | if (ret) |
67 | goto fail; | 59 | goto fail; |
68 | 60 | ||
@@ -96,9 +88,7 @@ int perf_trace_init(struct perf_event *p_event) | |||
96 | mutex_lock(&event_mutex); | 88 | mutex_lock(&event_mutex); |
97 | list_for_each_entry(tp_event, &ftrace_events, list) { | 89 | list_for_each_entry(tp_event, &ftrace_events, list) { |
98 | if (tp_event->event.type == event_id && | 90 | if (tp_event->event.type == event_id && |
99 | tp_event->class && | 91 | tp_event->class && tp_event->class->reg && |
100 | (tp_event->class->perf_probe || | ||
101 | tp_event->class->reg) && | ||
102 | try_module_get(tp_event->mod)) { | 92 | try_module_get(tp_event->mod)) { |
103 | ret = perf_trace_event_init(tp_event, p_event); | 93 | ret = perf_trace_event_init(tp_event, p_event); |
104 | break; | 94 | break; |
@@ -138,18 +128,13 @@ void perf_trace_destroy(struct perf_event *p_event) | |||
138 | if (--tp_event->perf_refcount > 0) | 128 | if (--tp_event->perf_refcount > 0) |
139 | goto out; | 129 | goto out; |
140 | 130 | ||
141 | if (tp_event->class->reg) | 131 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); |
142 | tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER); | ||
143 | else | ||
144 | tracepoint_probe_unregister(tp_event->name, | ||
145 | tp_event->class->perf_probe, | ||
146 | tp_event); | ||
147 | 132 | ||
148 | /* | 133 | /* |
149 | * Ensure our callback won't be called anymore. See | 134 | * Ensure our callback won't be called anymore. The buffers |
150 | * tracepoint_probe_unregister() and __DO_TRACE(). | 135 | * will be freed after that. |
151 | */ | 136 | */ |
152 | synchronize_sched(); | 137 | tracepoint_synchronize_unregister(); |
153 | 138 | ||
154 | free_percpu(tp_event->perf_events); | 139 | free_percpu(tp_event->perf_events); |
155 | tp_event->perf_events = NULL; | 140 | tp_event->perf_events = NULL; |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 53cffc0b0801..09b4fa6e4d3b 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -28,6 +28,7 @@ | |||
28 | DEFINE_MUTEX(event_mutex); | 28 | DEFINE_MUTEX(event_mutex); |
29 | 29 | ||
30 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
31 | LIST_HEAD(ftrace_common_fields); | ||
31 | 32 | ||
32 | struct list_head * | 33 | struct list_head * |
33 | trace_get_fields(struct ftrace_event_call *event_call) | 34 | trace_get_fields(struct ftrace_event_call *event_call) |
@@ -37,15 +38,11 @@ trace_get_fields(struct ftrace_event_call *event_call) | |||
37 | return event_call->class->get_fields(event_call); | 38 | return event_call->class->get_fields(event_call); |
38 | } | 39 | } |
39 | 40 | ||
40 | int trace_define_field(struct ftrace_event_call *call, const char *type, | 41 | static int __trace_define_field(struct list_head *head, const char *type, |
41 | const char *name, int offset, int size, int is_signed, | 42 | const char *name, int offset, int size, |
42 | int filter_type) | 43 | int is_signed, int filter_type) |
43 | { | 44 | { |
44 | struct ftrace_event_field *field; | 45 | struct ftrace_event_field *field; |
45 | struct list_head *head; | ||
46 | |||
47 | if (WARN_ON(!call->class)) | ||
48 | return 0; | ||
49 | 46 | ||
50 | field = kzalloc(sizeof(*field), GFP_KERNEL); | 47 | field = kzalloc(sizeof(*field), GFP_KERNEL); |
51 | if (!field) | 48 | if (!field) |
@@ -68,7 +65,6 @@ int trace_define_field(struct ftrace_event_call *call, const char *type, | |||
68 | field->size = size; | 65 | field->size = size; |
69 | field->is_signed = is_signed; | 66 | field->is_signed = is_signed; |
70 | 67 | ||
71 | head = trace_get_fields(call); | ||
72 | list_add(&field->link, head); | 68 | list_add(&field->link, head); |
73 | 69 | ||
74 | return 0; | 70 | return 0; |
@@ -80,17 +76,32 @@ err: | |||
80 | 76 | ||
81 | return -ENOMEM; | 77 | return -ENOMEM; |
82 | } | 78 | } |
79 | |||
80 | int trace_define_field(struct ftrace_event_call *call, const char *type, | ||
81 | const char *name, int offset, int size, int is_signed, | ||
82 | int filter_type) | ||
83 | { | ||
84 | struct list_head *head; | ||
85 | |||
86 | if (WARN_ON(!call->class)) | ||
87 | return 0; | ||
88 | |||
89 | head = trace_get_fields(call); | ||
90 | return __trace_define_field(head, type, name, offset, size, | ||
91 | is_signed, filter_type); | ||
92 | } | ||
83 | EXPORT_SYMBOL_GPL(trace_define_field); | 93 | EXPORT_SYMBOL_GPL(trace_define_field); |
84 | 94 | ||
85 | #define __common_field(type, item) \ | 95 | #define __common_field(type, item) \ |
86 | ret = trace_define_field(call, #type, "common_" #item, \ | 96 | ret = __trace_define_field(&ftrace_common_fields, #type, \ |
87 | offsetof(typeof(ent), item), \ | 97 | "common_" #item, \ |
88 | sizeof(ent.item), \ | 98 | offsetof(typeof(ent), item), \ |
89 | is_signed_type(type), FILTER_OTHER); \ | 99 | sizeof(ent.item), \ |
100 | is_signed_type(type), FILTER_OTHER); \ | ||
90 | if (ret) \ | 101 | if (ret) \ |
91 | return ret; | 102 | return ret; |
92 | 103 | ||
93 | static int trace_define_common_fields(struct ftrace_event_call *call) | 104 | static int trace_define_common_fields(void) |
94 | { | 105 | { |
95 | int ret; | 106 | int ret; |
96 | struct trace_entry ent; | 107 | struct trace_entry ent; |
@@ -130,6 +141,55 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
130 | } | 141 | } |
131 | EXPORT_SYMBOL_GPL(trace_event_raw_init); | 142 | EXPORT_SYMBOL_GPL(trace_event_raw_init); |
132 | 143 | ||
144 | int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type) | ||
145 | { | ||
146 | switch (type) { | ||
147 | case TRACE_REG_REGISTER: | ||
148 | return tracepoint_probe_register(call->name, | ||
149 | call->class->probe, | ||
150 | call); | ||
151 | case TRACE_REG_UNREGISTER: | ||
152 | tracepoint_probe_unregister(call->name, | ||
153 | call->class->probe, | ||
154 | call); | ||
155 | return 0; | ||
156 | |||
157 | #ifdef CONFIG_PERF_EVENTS | ||
158 | case TRACE_REG_PERF_REGISTER: | ||
159 | return tracepoint_probe_register(call->name, | ||
160 | call->class->perf_probe, | ||
161 | call); | ||
162 | case TRACE_REG_PERF_UNREGISTER: | ||
163 | tracepoint_probe_unregister(call->name, | ||
164 | call->class->perf_probe, | ||
165 | call); | ||
166 | return 0; | ||
167 | #endif | ||
168 | } | ||
169 | return 0; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(ftrace_event_reg); | ||
172 | |||
173 | void trace_event_enable_cmd_record(bool enable) | ||
174 | { | ||
175 | struct ftrace_event_call *call; | ||
176 | |||
177 | mutex_lock(&event_mutex); | ||
178 | list_for_each_entry(call, &ftrace_events, list) { | ||
179 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) | ||
180 | continue; | ||
181 | |||
182 | if (enable) { | ||
183 | tracing_start_cmdline_record(); | ||
184 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; | ||
185 | } else { | ||
186 | tracing_stop_cmdline_record(); | ||
187 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; | ||
188 | } | ||
189 | } | ||
190 | mutex_unlock(&event_mutex); | ||
191 | } | ||
192 | |||
133 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, | 193 | static int ftrace_event_enable_disable(struct ftrace_event_call *call, |
134 | int enable) | 194 | int enable) |
135 | { | 195 | { |
@@ -139,24 +199,20 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
139 | case 0: | 199 | case 0: |
140 | if (call->flags & TRACE_EVENT_FL_ENABLED) { | 200 | if (call->flags & TRACE_EVENT_FL_ENABLED) { |
141 | call->flags &= ~TRACE_EVENT_FL_ENABLED; | 201 | call->flags &= ~TRACE_EVENT_FL_ENABLED; |
142 | tracing_stop_cmdline_record(); | 202 | if (call->flags & TRACE_EVENT_FL_RECORDED_CMD) { |
143 | if (call->class->reg) | 203 | tracing_stop_cmdline_record(); |
144 | call->class->reg(call, TRACE_REG_UNREGISTER); | 204 | call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD; |
145 | else | 205 | } |
146 | tracepoint_probe_unregister(call->name, | 206 | call->class->reg(call, TRACE_REG_UNREGISTER); |
147 | call->class->probe, | ||
148 | call); | ||
149 | } | 207 | } |
150 | break; | 208 | break; |
151 | case 1: | 209 | case 1: |
152 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { | 210 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { |
153 | tracing_start_cmdline_record(); | 211 | if (trace_flags & TRACE_ITER_RECORD_CMD) { |
154 | if (call->class->reg) | 212 | tracing_start_cmdline_record(); |
155 | ret = call->class->reg(call, TRACE_REG_REGISTER); | 213 | call->flags |= TRACE_EVENT_FL_RECORDED_CMD; |
156 | else | 214 | } |
157 | ret = tracepoint_probe_register(call->name, | 215 | ret = call->class->reg(call, TRACE_REG_REGISTER); |
158 | call->class->probe, | ||
159 | call); | ||
160 | if (ret) { | 216 | if (ret) { |
161 | tracing_stop_cmdline_record(); | 217 | tracing_stop_cmdline_record(); |
162 | pr_info("event trace: Could not enable event " | 218 | pr_info("event trace: Could not enable event " |
@@ -194,8 +250,7 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, | |||
194 | mutex_lock(&event_mutex); | 250 | mutex_lock(&event_mutex); |
195 | list_for_each_entry(call, &ftrace_events, list) { | 251 | list_for_each_entry(call, &ftrace_events, list) { |
196 | 252 | ||
197 | if (!call->name || !call->class || | 253 | if (!call->name || !call->class || !call->class->reg) |
198 | (!call->class->probe && !call->class->reg)) | ||
199 | continue; | 254 | continue; |
200 | 255 | ||
201 | if (match && | 256 | if (match && |
@@ -321,7 +376,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
321 | * The ftrace subsystem is for showing formats only. | 376 | * The ftrace subsystem is for showing formats only. |
322 | * They can not be enabled or disabled via the event files. | 377 | * They can not be enabled or disabled via the event files. |
323 | */ | 378 | */ |
324 | if (call->class && (call->class->probe || call->class->reg)) | 379 | if (call->class && call->class->reg) |
325 | return call; | 380 | return call; |
326 | } | 381 | } |
327 | 382 | ||
@@ -474,8 +529,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
474 | 529 | ||
475 | mutex_lock(&event_mutex); | 530 | mutex_lock(&event_mutex); |
476 | list_for_each_entry(call, &ftrace_events, list) { | 531 | list_for_each_entry(call, &ftrace_events, list) { |
477 | if (!call->name || !call->class || | 532 | if (!call->name || !call->class || !call->class->reg) |
478 | (!call->class->probe && !call->class->reg)) | ||
479 | continue; | 533 | continue; |
480 | 534 | ||
481 | if (system && strcmp(call->class->system, system) != 0) | 535 | if (system && strcmp(call->class->system, system) != 0) |
@@ -544,32 +598,10 @@ out: | |||
544 | return ret; | 598 | return ret; |
545 | } | 599 | } |
546 | 600 | ||
547 | static ssize_t | 601 | static void print_event_fields(struct trace_seq *s, struct list_head *head) |
548 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
549 | loff_t *ppos) | ||
550 | { | 602 | { |
551 | struct ftrace_event_call *call = filp->private_data; | ||
552 | struct ftrace_event_field *field; | 603 | struct ftrace_event_field *field; |
553 | struct list_head *head; | ||
554 | struct trace_seq *s; | ||
555 | int common_field_count = 5; | ||
556 | char *buf; | ||
557 | int r = 0; | ||
558 | |||
559 | if (*ppos) | ||
560 | return 0; | ||
561 | |||
562 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
563 | if (!s) | ||
564 | return -ENOMEM; | ||
565 | |||
566 | trace_seq_init(s); | ||
567 | |||
568 | trace_seq_printf(s, "name: %s\n", call->name); | ||
569 | trace_seq_printf(s, "ID: %d\n", call->event.type); | ||
570 | trace_seq_printf(s, "format:\n"); | ||
571 | 604 | ||
572 | head = trace_get_fields(call); | ||
573 | list_for_each_entry_reverse(field, head, link) { | 605 | list_for_each_entry_reverse(field, head, link) { |
574 | /* | 606 | /* |
575 | * Smartly shows the array type(except dynamic array). | 607 | * Smartly shows the array type(except dynamic array). |
@@ -584,29 +616,54 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
584 | array_descriptor = NULL; | 616 | array_descriptor = NULL; |
585 | 617 | ||
586 | if (!array_descriptor) { | 618 | if (!array_descriptor) { |
587 | r = trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" | 619 | trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" |
588 | "\tsize:%u;\tsigned:%d;\n", | 620 | "\tsize:%u;\tsigned:%d;\n", |
589 | field->type, field->name, field->offset, | 621 | field->type, field->name, field->offset, |
590 | field->size, !!field->is_signed); | 622 | field->size, !!field->is_signed); |
591 | } else { | 623 | } else { |
592 | r = trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" | 624 | trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" |
593 | "\tsize:%u;\tsigned:%d;\n", | 625 | "\tsize:%u;\tsigned:%d;\n", |
594 | (int)(array_descriptor - field->type), | 626 | (int)(array_descriptor - field->type), |
595 | field->type, field->name, | 627 | field->type, field->name, |
596 | array_descriptor, field->offset, | 628 | array_descriptor, field->offset, |
597 | field->size, !!field->is_signed); | 629 | field->size, !!field->is_signed); |
598 | } | 630 | } |
631 | } | ||
632 | } | ||
599 | 633 | ||
600 | if (--common_field_count == 0) | 634 | static ssize_t |
601 | r = trace_seq_printf(s, "\n"); | 635 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, |
636 | loff_t *ppos) | ||
637 | { | ||
638 | struct ftrace_event_call *call = filp->private_data; | ||
639 | struct list_head *head; | ||
640 | struct trace_seq *s; | ||
641 | char *buf; | ||
642 | int r; | ||
602 | 643 | ||
603 | if (!r) | 644 | if (*ppos) |
604 | break; | 645 | return 0; |
605 | } | 646 | |
647 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
648 | if (!s) | ||
649 | return -ENOMEM; | ||
650 | |||
651 | trace_seq_init(s); | ||
652 | |||
653 | trace_seq_printf(s, "name: %s\n", call->name); | ||
654 | trace_seq_printf(s, "ID: %d\n", call->event.type); | ||
655 | trace_seq_printf(s, "format:\n"); | ||
656 | |||
657 | /* print common fields */ | ||
658 | print_event_fields(s, &ftrace_common_fields); | ||
606 | 659 | ||
607 | if (r) | 660 | trace_seq_putc(s, '\n'); |
608 | r = trace_seq_printf(s, "\nprint fmt: %s\n", | 661 | |
609 | call->print_fmt); | 662 | /* print event specific fields */ |
663 | head = trace_get_fields(call); | ||
664 | print_event_fields(s, head); | ||
665 | |||
666 | r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); | ||
610 | 667 | ||
611 | if (!r) { | 668 | if (!r) { |
612 | /* | 669 | /* |
@@ -963,35 +1020,31 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
963 | return -1; | 1020 | return -1; |
964 | } | 1021 | } |
965 | 1022 | ||
966 | if (call->class->probe || call->class->reg) | 1023 | if (call->class->reg) |
967 | trace_create_file("enable", 0644, call->dir, call, | 1024 | trace_create_file("enable", 0644, call->dir, call, |
968 | enable); | 1025 | enable); |
969 | 1026 | ||
970 | #ifdef CONFIG_PERF_EVENTS | 1027 | #ifdef CONFIG_PERF_EVENTS |
971 | if (call->event.type && (call->class->perf_probe || call->class->reg)) | 1028 | if (call->event.type && call->class->reg) |
972 | trace_create_file("id", 0444, call->dir, call, | 1029 | trace_create_file("id", 0444, call->dir, call, |
973 | id); | 1030 | id); |
974 | #endif | 1031 | #endif |
975 | 1032 | ||
976 | if (call->class->define_fields) { | 1033 | /* |
977 | /* | 1034 | * Other events may have the same class. Only update |
978 | * Other events may have the same class. Only update | 1035 | * the fields if they are not already defined. |
979 | * the fields if they are not already defined. | 1036 | */ |
980 | */ | 1037 | head = trace_get_fields(call); |
981 | head = trace_get_fields(call); | 1038 | if (list_empty(head)) { |
982 | if (list_empty(head)) { | 1039 | ret = call->class->define_fields(call); |
983 | ret = trace_define_common_fields(call); | 1040 | if (ret < 0) { |
984 | if (!ret) | 1041 | pr_warning("Could not initialize trace point" |
985 | ret = call->class->define_fields(call); | 1042 | " events/%s\n", call->name); |
986 | if (ret < 0) { | 1043 | return ret; |
987 | pr_warning("Could not initialize trace point" | ||
988 | " events/%s\n", call->name); | ||
989 | return ret; | ||
990 | } | ||
991 | } | 1044 | } |
992 | trace_create_file("filter", 0644, call->dir, call, | ||
993 | filter); | ||
994 | } | 1045 | } |
1046 | trace_create_file("filter", 0644, call->dir, call, | ||
1047 | filter); | ||
995 | 1048 | ||
996 | trace_create_file("format", 0444, call->dir, call, | 1049 | trace_create_file("format", 0444, call->dir, call, |
997 | format); | 1050 | format); |
@@ -999,11 +1052,17 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
999 | return 0; | 1052 | return 0; |
1000 | } | 1053 | } |
1001 | 1054 | ||
1002 | static int __trace_add_event_call(struct ftrace_event_call *call) | 1055 | static int |
1056 | __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, | ||
1057 | const struct file_operations *id, | ||
1058 | const struct file_operations *enable, | ||
1059 | const struct file_operations *filter, | ||
1060 | const struct file_operations *format) | ||
1003 | { | 1061 | { |
1004 | struct dentry *d_events; | 1062 | struct dentry *d_events; |
1005 | int ret; | 1063 | int ret; |
1006 | 1064 | ||
1065 | /* The linker may leave blanks */ | ||
1007 | if (!call->name) | 1066 | if (!call->name) |
1008 | return -EINVAL; | 1067 | return -EINVAL; |
1009 | 1068 | ||
@@ -1011,8 +1070,8 @@ static int __trace_add_event_call(struct ftrace_event_call *call) | |||
1011 | ret = call->class->raw_init(call); | 1070 | ret = call->class->raw_init(call); |
1012 | if (ret < 0) { | 1071 | if (ret < 0) { |
1013 | if (ret != -ENOSYS) | 1072 | if (ret != -ENOSYS) |
1014 | pr_warning("Could not initialize trace " | 1073 | pr_warning("Could not initialize trace events/%s\n", |
1015 | "events/%s\n", call->name); | 1074 | call->name); |
1016 | return ret; | 1075 | return ret; |
1017 | } | 1076 | } |
1018 | } | 1077 | } |
@@ -1021,11 +1080,10 @@ static int __trace_add_event_call(struct ftrace_event_call *call) | |||
1021 | if (!d_events) | 1080 | if (!d_events) |
1022 | return -ENOENT; | 1081 | return -ENOENT; |
1023 | 1082 | ||
1024 | ret = event_create_dir(call, d_events, &ftrace_event_id_fops, | 1083 | ret = event_create_dir(call, d_events, id, enable, filter, format); |
1025 | &ftrace_enable_fops, &ftrace_event_filter_fops, | ||
1026 | &ftrace_event_format_fops); | ||
1027 | if (!ret) | 1084 | if (!ret) |
1028 | list_add(&call->list, &ftrace_events); | 1085 | list_add(&call->list, &ftrace_events); |
1086 | call->mod = mod; | ||
1029 | 1087 | ||
1030 | return ret; | 1088 | return ret; |
1031 | } | 1089 | } |
@@ -1035,7 +1093,10 @@ int trace_add_event_call(struct ftrace_event_call *call) | |||
1035 | { | 1093 | { |
1036 | int ret; | 1094 | int ret; |
1037 | mutex_lock(&event_mutex); | 1095 | mutex_lock(&event_mutex); |
1038 | ret = __trace_add_event_call(call); | 1096 | ret = __trace_add_event_call(call, NULL, &ftrace_event_id_fops, |
1097 | &ftrace_enable_fops, | ||
1098 | &ftrace_event_filter_fops, | ||
1099 | &ftrace_event_format_fops); | ||
1039 | mutex_unlock(&event_mutex); | 1100 | mutex_unlock(&event_mutex); |
1040 | return ret; | 1101 | return ret; |
1041 | } | 1102 | } |
@@ -1152,8 +1213,6 @@ static void trace_module_add_events(struct module *mod) | |||
1152 | { | 1213 | { |
1153 | struct ftrace_module_file_ops *file_ops = NULL; | 1214 | struct ftrace_module_file_ops *file_ops = NULL; |
1154 | struct ftrace_event_call *call, *start, *end; | 1215 | struct ftrace_event_call *call, *start, *end; |
1155 | struct dentry *d_events; | ||
1156 | int ret; | ||
1157 | 1216 | ||
1158 | start = mod->trace_events; | 1217 | start = mod->trace_events; |
1159 | end = mod->trace_events + mod->num_trace_events; | 1218 | end = mod->trace_events + mod->num_trace_events; |
@@ -1161,38 +1220,14 @@ static void trace_module_add_events(struct module *mod) | |||
1161 | if (start == end) | 1220 | if (start == end) |
1162 | return; | 1221 | return; |
1163 | 1222 | ||
1164 | d_events = event_trace_events_dir(); | 1223 | file_ops = trace_create_file_ops(mod); |
1165 | if (!d_events) | 1224 | if (!file_ops) |
1166 | return; | 1225 | return; |
1167 | 1226 | ||
1168 | for_each_event(call, start, end) { | 1227 | for_each_event(call, start, end) { |
1169 | /* The linker may leave blanks */ | 1228 | __trace_add_event_call(call, mod, |
1170 | if (!call->name) | ||
1171 | continue; | ||
1172 | if (call->class->raw_init) { | ||
1173 | ret = call->class->raw_init(call); | ||
1174 | if (ret < 0) { | ||
1175 | if (ret != -ENOSYS) | ||
1176 | pr_warning("Could not initialize trace " | ||
1177 | "point events/%s\n", call->name); | ||
1178 | continue; | ||
1179 | } | ||
1180 | } | ||
1181 | /* | ||
1182 | * This module has events, create file ops for this module | ||
1183 | * if not already done. | ||
1184 | */ | ||
1185 | if (!file_ops) { | ||
1186 | file_ops = trace_create_file_ops(mod); | ||
1187 | if (!file_ops) | ||
1188 | return; | ||
1189 | } | ||
1190 | call->mod = mod; | ||
1191 | ret = event_create_dir(call, d_events, | ||
1192 | &file_ops->id, &file_ops->enable, | 1229 | &file_ops->id, &file_ops->enable, |
1193 | &file_ops->filter, &file_ops->format); | 1230 | &file_ops->filter, &file_ops->format); |
1194 | if (!ret) | ||
1195 | list_add(&call->list, &ftrace_events); | ||
1196 | } | 1231 | } |
1197 | } | 1232 | } |
1198 | 1233 | ||
@@ -1319,25 +1354,14 @@ static __init int event_trace_init(void) | |||
1319 | trace_create_file("enable", 0644, d_events, | 1354 | trace_create_file("enable", 0644, d_events, |
1320 | NULL, &ftrace_system_enable_fops); | 1355 | NULL, &ftrace_system_enable_fops); |
1321 | 1356 | ||
1357 | if (trace_define_common_fields()) | ||
1358 | pr_warning("tracing: Failed to allocate common fields"); | ||
1359 | |||
1322 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { | 1360 | for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { |
1323 | /* The linker may leave blanks */ | 1361 | __trace_add_event_call(call, NULL, &ftrace_event_id_fops, |
1324 | if (!call->name) | ||
1325 | continue; | ||
1326 | if (call->class->raw_init) { | ||
1327 | ret = call->class->raw_init(call); | ||
1328 | if (ret < 0) { | ||
1329 | if (ret != -ENOSYS) | ||
1330 | pr_warning("Could not initialize trace " | ||
1331 | "point events/%s\n", call->name); | ||
1332 | continue; | ||
1333 | } | ||
1334 | } | ||
1335 | ret = event_create_dir(call, d_events, &ftrace_event_id_fops, | ||
1336 | &ftrace_enable_fops, | 1362 | &ftrace_enable_fops, |
1337 | &ftrace_event_filter_fops, | 1363 | &ftrace_event_filter_fops, |
1338 | &ftrace_event_format_fops); | 1364 | &ftrace_event_format_fops); |
1339 | if (!ret) | ||
1340 | list_add(&call->list, &ftrace_events); | ||
1341 | } | 1365 | } |
1342 | 1366 | ||
1343 | while (true) { | 1367 | while (true) { |
@@ -1524,12 +1548,11 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1524 | struct ftrace_entry *entry; | 1548 | struct ftrace_entry *entry; |
1525 | unsigned long flags; | 1549 | unsigned long flags; |
1526 | long disabled; | 1550 | long disabled; |
1527 | int resched; | ||
1528 | int cpu; | 1551 | int cpu; |
1529 | int pc; | 1552 | int pc; |
1530 | 1553 | ||
1531 | pc = preempt_count(); | 1554 | pc = preempt_count(); |
1532 | resched = ftrace_preempt_disable(); | 1555 | preempt_disable_notrace(); |
1533 | cpu = raw_smp_processor_id(); | 1556 | cpu = raw_smp_processor_id(); |
1534 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); | 1557 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
1535 | 1558 | ||
@@ -1551,7 +1574,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1551 | 1574 | ||
1552 | out: | 1575 | out: |
1553 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); | 1576 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
1554 | ftrace_preempt_enable(resched); | 1577 | preempt_enable_notrace(); |
1555 | } | 1578 | } |
1556 | 1579 | ||
1557 | static struct ftrace_ops trace_ops __initdata = | 1580 | static struct ftrace_ops trace_ops __initdata = |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 57bb1bb32999..36d40104b17f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -497,12 +497,10 @@ void print_subsystem_event_filter(struct event_subsystem *system, | |||
497 | } | 497 | } |
498 | 498 | ||
499 | static struct ftrace_event_field * | 499 | static struct ftrace_event_field * |
500 | find_event_field(struct ftrace_event_call *call, char *name) | 500 | __find_event_field(struct list_head *head, char *name) |
501 | { | 501 | { |
502 | struct ftrace_event_field *field; | 502 | struct ftrace_event_field *field; |
503 | struct list_head *head; | ||
504 | 503 | ||
505 | head = trace_get_fields(call); | ||
506 | list_for_each_entry(field, head, link) { | 504 | list_for_each_entry(field, head, link) { |
507 | if (!strcmp(field->name, name)) | 505 | if (!strcmp(field->name, name)) |
508 | return field; | 506 | return field; |
@@ -511,6 +509,20 @@ find_event_field(struct ftrace_event_call *call, char *name) | |||
511 | return NULL; | 509 | return NULL; |
512 | } | 510 | } |
513 | 511 | ||
512 | static struct ftrace_event_field * | ||
513 | find_event_field(struct ftrace_event_call *call, char *name) | ||
514 | { | ||
515 | struct ftrace_event_field *field; | ||
516 | struct list_head *head; | ||
517 | |||
518 | field = __find_event_field(&ftrace_common_fields, name); | ||
519 | if (field) | ||
520 | return field; | ||
521 | |||
522 | head = trace_get_fields(call); | ||
523 | return __find_event_field(head, name); | ||
524 | } | ||
525 | |||
514 | static void filter_free_pred(struct filter_pred *pred) | 526 | static void filter_free_pred(struct filter_pred *pred) |
515 | { | 527 | { |
516 | if (!pred) | 528 | if (!pred) |
@@ -627,9 +639,6 @@ static int init_subsystem_preds(struct event_subsystem *system) | |||
627 | int err; | 639 | int err; |
628 | 640 | ||
629 | list_for_each_entry(call, &ftrace_events, list) { | 641 | list_for_each_entry(call, &ftrace_events, list) { |
630 | if (!call->class || !call->class->define_fields) | ||
631 | continue; | ||
632 | |||
633 | if (strcmp(call->class->system, system->name) != 0) | 642 | if (strcmp(call->class->system, system->name) != 0) |
634 | continue; | 643 | continue; |
635 | 644 | ||
@@ -646,9 +655,6 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) | |||
646 | struct ftrace_event_call *call; | 655 | struct ftrace_event_call *call; |
647 | 656 | ||
648 | list_for_each_entry(call, &ftrace_events, list) { | 657 | list_for_each_entry(call, &ftrace_events, list) { |
649 | if (!call->class || !call->class->define_fields) | ||
650 | continue; | ||
651 | |||
652 | if (strcmp(call->class->system, system->name) != 0) | 658 | if (strcmp(call->class->system, system->name) != 0) |
653 | continue; | 659 | continue; |
654 | 660 | ||
@@ -1251,9 +1257,6 @@ static int replace_system_preds(struct event_subsystem *system, | |||
1251 | list_for_each_entry(call, &ftrace_events, list) { | 1257 | list_for_each_entry(call, &ftrace_events, list) { |
1252 | struct event_filter *filter = call->filter; | 1258 | struct event_filter *filter = call->filter; |
1253 | 1259 | ||
1254 | if (!call->class || !call->class->define_fields) | ||
1255 | continue; | ||
1256 | |||
1257 | if (strcmp(call->class->system, system->name) != 0) | 1260 | if (strcmp(call->class->system, system->name) != 0) |
1258 | continue; | 1261 | continue; |
1259 | 1262 | ||
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c index 8536e2a65969..4ba44deaac25 100644 --- a/kernel/trace/trace_export.c +++ b/kernel/trace/trace_export.c | |||
@@ -125,12 +125,6 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ | |||
125 | 125 | ||
126 | #include "trace_entries.h" | 126 | #include "trace_entries.h" |
127 | 127 | ||
128 | static int ftrace_raw_init_event(struct ftrace_event_call *call) | ||
129 | { | ||
130 | INIT_LIST_HEAD(&call->class->fields); | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | #undef __entry | 128 | #undef __entry |
135 | #define __entry REC | 129 | #define __entry REC |
136 | 130 | ||
@@ -158,7 +152,7 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call) | |||
158 | struct ftrace_event_class event_class_ftrace_##call = { \ | 152 | struct ftrace_event_class event_class_ftrace_##call = { \ |
159 | .system = __stringify(TRACE_SYSTEM), \ | 153 | .system = __stringify(TRACE_SYSTEM), \ |
160 | .define_fields = ftrace_define_fields_##call, \ | 154 | .define_fields = ftrace_define_fields_##call, \ |
161 | .raw_init = ftrace_raw_init_event, \ | 155 | .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ |
162 | }; \ | 156 | }; \ |
163 | \ | 157 | \ |
164 | struct ftrace_event_call __used \ | 158 | struct ftrace_event_call __used \ |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index b3f3776b0cd6..16aee4d44e8f 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -54,14 +54,14 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |||
54 | struct trace_array_cpu *data; | 54 | struct trace_array_cpu *data; |
55 | unsigned long flags; | 55 | unsigned long flags; |
56 | long disabled; | 56 | long disabled; |
57 | int cpu, resched; | 57 | int cpu; |
58 | int pc; | 58 | int pc; |
59 | 59 | ||
60 | if (unlikely(!ftrace_function_enabled)) | 60 | if (unlikely(!ftrace_function_enabled)) |
61 | return; | 61 | return; |
62 | 62 | ||
63 | pc = preempt_count(); | 63 | pc = preempt_count(); |
64 | resched = ftrace_preempt_disable(); | 64 | preempt_disable_notrace(); |
65 | local_save_flags(flags); | 65 | local_save_flags(flags); |
66 | cpu = raw_smp_processor_id(); | 66 | cpu = raw_smp_processor_id(); |
67 | data = tr->data[cpu]; | 67 | data = tr->data[cpu]; |
@@ -71,7 +71,7 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | |||
71 | trace_function(tr, ip, parent_ip, flags, pc); | 71 | trace_function(tr, ip, parent_ip, flags, pc); |
72 | 72 | ||
73 | atomic_dec(&data->disabled); | 73 | atomic_dec(&data->disabled); |
74 | ftrace_preempt_enable(resched); | 74 | preempt_enable_notrace(); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void | 77 | static void |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 79f4bac99a94..6bff23625781 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -641,7 +641,8 @@ trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
641 | 641 | ||
642 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 642 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
643 | if (len < 7) { | 643 | if (len < 7) { |
644 | snprintf(nsecs_str, 8 - len, "%03lu", nsecs_rem); | 644 | snprintf(nsecs_str, min(sizeof(nsecs_str), 8UL - len), "%03lu", |
645 | nsecs_rem); | ||
645 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 646 | ret = trace_seq_printf(s, ".%s", nsecs_str); |
646 | if (!ret) | 647 | if (!ret) |
647 | return TRACE_TYPE_PARTIAL_LINE; | 648 | return TRACE_TYPE_PARTIAL_LINE; |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 6fd486e0cef4..73a6b0601f2e 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -649,6 +649,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
649 | #endif | 649 | #endif |
650 | .open = irqsoff_trace_open, | 650 | .open = irqsoff_trace_open, |
651 | .close = irqsoff_trace_close, | 651 | .close = irqsoff_trace_close, |
652 | .use_max_tr = 1, | ||
652 | }; | 653 | }; |
653 | # define register_irqsoff(trace) register_tracer(&trace) | 654 | # define register_irqsoff(trace) register_tracer(&trace) |
654 | #else | 655 | #else |
@@ -681,6 +682,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
681 | #endif | 682 | #endif |
682 | .open = irqsoff_trace_open, | 683 | .open = irqsoff_trace_open, |
683 | .close = irqsoff_trace_close, | 684 | .close = irqsoff_trace_close, |
685 | .use_max_tr = 1, | ||
684 | }; | 686 | }; |
685 | # define register_preemptoff(trace) register_tracer(&trace) | 687 | # define register_preemptoff(trace) register_tracer(&trace) |
686 | #else | 688 | #else |
@@ -715,6 +717,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
715 | #endif | 717 | #endif |
716 | .open = irqsoff_trace_open, | 718 | .open = irqsoff_trace_open, |
717 | .close = irqsoff_trace_close, | 719 | .close = irqsoff_trace_close, |
720 | .use_max_tr = 1, | ||
718 | }; | 721 | }; |
719 | 722 | ||
720 | # define register_preemptirqsoff(trace) register_tracer(&trace) | 723 | # define register_preemptirqsoff(trace) register_tracer(&trace) |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index f52b5f50299d..8b27c9849b42 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/ptrace.h> | 30 | #include <linux/ptrace.h> |
31 | #include <linux/perf_event.h> | 31 | #include <linux/perf_event.h> |
32 | #include <linux/stringify.h> | 32 | #include <linux/stringify.h> |
33 | #include <linux/limits.h> | ||
34 | #include <linux/uaccess.h> | ||
33 | #include <asm/bitsperlong.h> | 35 | #include <asm/bitsperlong.h> |
34 | 36 | ||
35 | #include "trace.h" | 37 | #include "trace.h" |
@@ -38,6 +40,7 @@ | |||
38 | #define MAX_TRACE_ARGS 128 | 40 | #define MAX_TRACE_ARGS 128 |
39 | #define MAX_ARGSTR_LEN 63 | 41 | #define MAX_ARGSTR_LEN 63 |
40 | #define MAX_EVENT_NAME_LEN 64 | 42 | #define MAX_EVENT_NAME_LEN 64 |
43 | #define MAX_STRING_SIZE PATH_MAX | ||
41 | #define KPROBE_EVENT_SYSTEM "kprobes" | 44 | #define KPROBE_EVENT_SYSTEM "kprobes" |
42 | 45 | ||
43 | /* Reserved field names */ | 46 | /* Reserved field names */ |
@@ -58,14 +61,16 @@ const char *reserved_field_names[] = { | |||
58 | }; | 61 | }; |
59 | 62 | ||
60 | /* Printing function type */ | 63 | /* Printing function type */ |
61 | typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *); | 64 | typedef int (*print_type_func_t)(struct trace_seq *, const char *, void *, |
65 | void *); | ||
62 | #define PRINT_TYPE_FUNC_NAME(type) print_type_##type | 66 | #define PRINT_TYPE_FUNC_NAME(type) print_type_##type |
63 | #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type | 67 | #define PRINT_TYPE_FMT_NAME(type) print_type_format_##type |
64 | 68 | ||
65 | /* Printing in basic type function template */ | 69 | /* Printing in basic type function template */ |
66 | #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ | 70 | #define DEFINE_BASIC_PRINT_TYPE_FUNC(type, fmt, cast) \ |
67 | static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ | 71 | static __kprobes int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, \ |
68 | const char *name, void *data)\ | 72 | const char *name, \ |
73 | void *data, void *ent)\ | ||
69 | { \ | 74 | { \ |
70 | return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ | 75 | return trace_seq_printf(s, " %s=" fmt, name, (cast)*(type *)data);\ |
71 | } \ | 76 | } \ |
@@ -80,6 +85,49 @@ DEFINE_BASIC_PRINT_TYPE_FUNC(s16, "%d", int) | |||
80 | DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) | 85 | DEFINE_BASIC_PRINT_TYPE_FUNC(s32, "%ld", long) |
81 | DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) | 86 | DEFINE_BASIC_PRINT_TYPE_FUNC(s64, "%lld", long long) |
82 | 87 | ||
88 | /* data_rloc: data relative location, compatible with u32 */ | ||
89 | #define make_data_rloc(len, roffs) \ | ||
90 | (((u32)(len) << 16) | ((u32)(roffs) & 0xffff)) | ||
91 | #define get_rloc_len(dl) ((u32)(dl) >> 16) | ||
92 | #define get_rloc_offs(dl) ((u32)(dl) & 0xffff) | ||
93 | |||
94 | static inline void *get_rloc_data(u32 *dl) | ||
95 | { | ||
96 | return (u8 *)dl + get_rloc_offs(*dl); | ||
97 | } | ||
98 | |||
99 | /* For data_loc conversion */ | ||
100 | static inline void *get_loc_data(u32 *dl, void *ent) | ||
101 | { | ||
102 | return (u8 *)ent + get_rloc_offs(*dl); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Convert data_rloc to data_loc: | ||
107 | * data_rloc stores the offset from data_rloc itself, but data_loc | ||
108 | * stores the offset from event entry. | ||
109 | */ | ||
110 | #define convert_rloc_to_loc(dl, offs) ((u32)(dl) + (offs)) | ||
111 | |||
112 | /* For defining macros, define string/string_size types */ | ||
113 | typedef u32 string; | ||
114 | typedef u32 string_size; | ||
115 | |||
116 | /* Print type function for string type */ | ||
117 | static __kprobes int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, | ||
118 | const char *name, | ||
119 | void *data, void *ent) | ||
120 | { | ||
121 | int len = *(u32 *)data >> 16; | ||
122 | |||
123 | if (!len) | ||
124 | return trace_seq_printf(s, " %s=(fault)", name); | ||
125 | else | ||
126 | return trace_seq_printf(s, " %s=\"%s\"", name, | ||
127 | (const char *)get_loc_data(data, ent)); | ||
128 | } | ||
129 | static const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; | ||
130 | |||
83 | /* Data fetch function type */ | 131 | /* Data fetch function type */ |
84 | typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); | 132 | typedef void (*fetch_func_t)(struct pt_regs *, void *, void *); |
85 | 133 | ||
@@ -94,32 +142,38 @@ static __kprobes void call_fetch(struct fetch_param *fprm, | |||
94 | return fprm->fn(regs, fprm->data, dest); | 142 | return fprm->fn(regs, fprm->data, dest); |
95 | } | 143 | } |
96 | 144 | ||
97 | #define FETCH_FUNC_NAME(kind, type) fetch_##kind##_##type | 145 | #define FETCH_FUNC_NAME(method, type) fetch_##method##_##type |
98 | /* | 146 | /* |
99 | * Define macro for basic types - we don't need to define s* types, because | 147 | * Define macro for basic types - we don't need to define s* types, because |
100 | * we have to care only about bitwidth at recording time. | 148 | * we have to care only about bitwidth at recording time. |
101 | */ | 149 | */ |
102 | #define DEFINE_BASIC_FETCH_FUNCS(kind) \ | 150 | #define DEFINE_BASIC_FETCH_FUNCS(method) \ |
103 | DEFINE_FETCH_##kind(u8) \ | 151 | DEFINE_FETCH_##method(u8) \ |
104 | DEFINE_FETCH_##kind(u16) \ | 152 | DEFINE_FETCH_##method(u16) \ |
105 | DEFINE_FETCH_##kind(u32) \ | 153 | DEFINE_FETCH_##method(u32) \ |
106 | DEFINE_FETCH_##kind(u64) | 154 | DEFINE_FETCH_##method(u64) |
107 | 155 | ||
108 | #define CHECK_BASIC_FETCH_FUNCS(kind, fn) \ | 156 | #define CHECK_FETCH_FUNCS(method, fn) \ |
109 | ((FETCH_FUNC_NAME(kind, u8) == fn) || \ | 157 | (((FETCH_FUNC_NAME(method, u8) == fn) || \ |
110 | (FETCH_FUNC_NAME(kind, u16) == fn) || \ | 158 | (FETCH_FUNC_NAME(method, u16) == fn) || \ |
111 | (FETCH_FUNC_NAME(kind, u32) == fn) || \ | 159 | (FETCH_FUNC_NAME(method, u32) == fn) || \ |
112 | (FETCH_FUNC_NAME(kind, u64) == fn)) | 160 | (FETCH_FUNC_NAME(method, u64) == fn) || \ |
161 | (FETCH_FUNC_NAME(method, string) == fn) || \ | ||
162 | (FETCH_FUNC_NAME(method, string_size) == fn)) \ | ||
163 | && (fn != NULL)) | ||
113 | 164 | ||
114 | /* Data fetch function templates */ | 165 | /* Data fetch function templates */ |
115 | #define DEFINE_FETCH_reg(type) \ | 166 | #define DEFINE_FETCH_reg(type) \ |
116 | static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ | 167 | static __kprobes void FETCH_FUNC_NAME(reg, type)(struct pt_regs *regs, \ |
117 | void *offset, void *dest) \ | 168 | void *offset, void *dest) \ |
118 | { \ | 169 | { \ |
119 | *(type *)dest = (type)regs_get_register(regs, \ | 170 | *(type *)dest = (type)regs_get_register(regs, \ |
120 | (unsigned int)((unsigned long)offset)); \ | 171 | (unsigned int)((unsigned long)offset)); \ |
121 | } | 172 | } |
122 | DEFINE_BASIC_FETCH_FUNCS(reg) | 173 | DEFINE_BASIC_FETCH_FUNCS(reg) |
174 | /* No string on the register */ | ||
175 | #define fetch_reg_string NULL | ||
176 | #define fetch_reg_string_size NULL | ||
123 | 177 | ||
124 | #define DEFINE_FETCH_stack(type) \ | 178 | #define DEFINE_FETCH_stack(type) \ |
125 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ | 179 | static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ |
@@ -129,6 +183,9 @@ static __kprobes void FETCH_FUNC_NAME(stack, type)(struct pt_regs *regs,\ | |||
129 | (unsigned int)((unsigned long)offset)); \ | 183 | (unsigned int)((unsigned long)offset)); \ |
130 | } | 184 | } |
131 | DEFINE_BASIC_FETCH_FUNCS(stack) | 185 | DEFINE_BASIC_FETCH_FUNCS(stack) |
186 | /* No string on the stack entry */ | ||
187 | #define fetch_stack_string NULL | ||
188 | #define fetch_stack_string_size NULL | ||
132 | 189 | ||
133 | #define DEFINE_FETCH_retval(type) \ | 190 | #define DEFINE_FETCH_retval(type) \ |
134 | static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ | 191 | static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ |
@@ -137,6 +194,9 @@ static __kprobes void FETCH_FUNC_NAME(retval, type)(struct pt_regs *regs,\ | |||
137 | *(type *)dest = (type)regs_return_value(regs); \ | 194 | *(type *)dest = (type)regs_return_value(regs); \ |
138 | } | 195 | } |
139 | DEFINE_BASIC_FETCH_FUNCS(retval) | 196 | DEFINE_BASIC_FETCH_FUNCS(retval) |
197 | /* No string on the retval */ | ||
198 | #define fetch_retval_string NULL | ||
199 | #define fetch_retval_string_size NULL | ||
140 | 200 | ||
141 | #define DEFINE_FETCH_memory(type) \ | 201 | #define DEFINE_FETCH_memory(type) \ |
142 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ | 202 | static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ |
@@ -149,6 +209,62 @@ static __kprobes void FETCH_FUNC_NAME(memory, type)(struct pt_regs *regs,\ | |||
149 | *(type *)dest = retval; \ | 209 | *(type *)dest = retval; \ |
150 | } | 210 | } |
151 | DEFINE_BASIC_FETCH_FUNCS(memory) | 211 | DEFINE_BASIC_FETCH_FUNCS(memory) |
212 | /* | ||
213 | * Fetch a null-terminated string. Caller MUST set *(u32 *)dest with max | ||
214 | * length and relative data location. | ||
215 | */ | ||
216 | static __kprobes void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs, | ||
217 | void *addr, void *dest) | ||
218 | { | ||
219 | long ret; | ||
220 | int maxlen = get_rloc_len(*(u32 *)dest); | ||
221 | u8 *dst = get_rloc_data(dest); | ||
222 | u8 *src = addr; | ||
223 | mm_segment_t old_fs = get_fs(); | ||
224 | if (!maxlen) | ||
225 | return; | ||
226 | /* | ||
227 | * Try to get string again, since the string can be changed while | ||
228 | * probing. | ||
229 | */ | ||
230 | set_fs(KERNEL_DS); | ||
231 | pagefault_disable(); | ||
232 | do | ||
233 | ret = __copy_from_user_inatomic(dst++, src++, 1); | ||
234 | while (dst[-1] && ret == 0 && src - (u8 *)addr < maxlen); | ||
235 | dst[-1] = '\0'; | ||
236 | pagefault_enable(); | ||
237 | set_fs(old_fs); | ||
238 | |||
239 | if (ret < 0) { /* Failed to fetch string */ | ||
240 | ((u8 *)get_rloc_data(dest))[0] = '\0'; | ||
241 | *(u32 *)dest = make_data_rloc(0, get_rloc_offs(*(u32 *)dest)); | ||
242 | } else | ||
243 | *(u32 *)dest = make_data_rloc(src - (u8 *)addr, | ||
244 | get_rloc_offs(*(u32 *)dest)); | ||
245 | } | ||
246 | /* Return the length of string -- including null terminal byte */ | ||
247 | static __kprobes void FETCH_FUNC_NAME(memory, string_size)(struct pt_regs *regs, | ||
248 | void *addr, void *dest) | ||
249 | { | ||
250 | int ret, len = 0; | ||
251 | u8 c; | ||
252 | mm_segment_t old_fs = get_fs(); | ||
253 | |||
254 | set_fs(KERNEL_DS); | ||
255 | pagefault_disable(); | ||
256 | do { | ||
257 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | ||
258 | len++; | ||
259 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | ||
260 | pagefault_enable(); | ||
261 | set_fs(old_fs); | ||
262 | |||
263 | if (ret < 0) /* Failed to check the length */ | ||
264 | *(u32 *)dest = 0; | ||
265 | else | ||
266 | *(u32 *)dest = len; | ||
267 | } | ||
152 | 268 | ||
153 | /* Memory fetching by symbol */ | 269 | /* Memory fetching by symbol */ |
154 | struct symbol_cache { | 270 | struct symbol_cache { |
@@ -203,6 +319,8 @@ static __kprobes void FETCH_FUNC_NAME(symbol, type)(struct pt_regs *regs,\ | |||
203 | *(type *)dest = 0; \ | 319 | *(type *)dest = 0; \ |
204 | } | 320 | } |
205 | DEFINE_BASIC_FETCH_FUNCS(symbol) | 321 | DEFINE_BASIC_FETCH_FUNCS(symbol) |
322 | DEFINE_FETCH_symbol(string) | ||
323 | DEFINE_FETCH_symbol(string_size) | ||
206 | 324 | ||
207 | /* Dereference memory access function */ | 325 | /* Dereference memory access function */ |
208 | struct deref_fetch_param { | 326 | struct deref_fetch_param { |
@@ -224,12 +342,14 @@ static __kprobes void FETCH_FUNC_NAME(deref, type)(struct pt_regs *regs,\ | |||
224 | *(type *)dest = 0; \ | 342 | *(type *)dest = 0; \ |
225 | } | 343 | } |
226 | DEFINE_BASIC_FETCH_FUNCS(deref) | 344 | DEFINE_BASIC_FETCH_FUNCS(deref) |
345 | DEFINE_FETCH_deref(string) | ||
346 | DEFINE_FETCH_deref(string_size) | ||
227 | 347 | ||
228 | static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) | 348 | static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) |
229 | { | 349 | { |
230 | if (CHECK_BASIC_FETCH_FUNCS(deref, data->orig.fn)) | 350 | if (CHECK_FETCH_FUNCS(deref, data->orig.fn)) |
231 | free_deref_fetch_param(data->orig.data); | 351 | free_deref_fetch_param(data->orig.data); |
232 | else if (CHECK_BASIC_FETCH_FUNCS(symbol, data->orig.fn)) | 352 | else if (CHECK_FETCH_FUNCS(symbol, data->orig.fn)) |
233 | free_symbol_cache(data->orig.data); | 353 | free_symbol_cache(data->orig.data); |
234 | kfree(data); | 354 | kfree(data); |
235 | } | 355 | } |
@@ -240,23 +360,43 @@ static __kprobes void free_deref_fetch_param(struct deref_fetch_param *data) | |||
240 | #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) | 360 | #define DEFAULT_FETCH_TYPE _DEFAULT_FETCH_TYPE(BITS_PER_LONG) |
241 | #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) | 361 | #define DEFAULT_FETCH_TYPE_STR __stringify(DEFAULT_FETCH_TYPE) |
242 | 362 | ||
243 | #define ASSIGN_FETCH_FUNC(kind, type) \ | 363 | /* Fetch types */ |
244 | .kind = FETCH_FUNC_NAME(kind, type) | 364 | enum { |
245 | 365 | FETCH_MTD_reg = 0, | |
246 | #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ | 366 | FETCH_MTD_stack, |
247 | {.name = #ptype, \ | 367 | FETCH_MTD_retval, |
248 | .size = sizeof(ftype), \ | 368 | FETCH_MTD_memory, |
249 | .is_signed = sign, \ | 369 | FETCH_MTD_symbol, |
250 | .print = PRINT_TYPE_FUNC_NAME(ptype), \ | 370 | FETCH_MTD_deref, |
251 | .fmt = PRINT_TYPE_FMT_NAME(ptype), \ | 371 | FETCH_MTD_END, |
252 | ASSIGN_FETCH_FUNC(reg, ftype), \ | 372 | }; |
253 | ASSIGN_FETCH_FUNC(stack, ftype), \ | 373 | |
254 | ASSIGN_FETCH_FUNC(retval, ftype), \ | 374 | #define ASSIGN_FETCH_FUNC(method, type) \ |
255 | ASSIGN_FETCH_FUNC(memory, ftype), \ | 375 | [FETCH_MTD_##method] = FETCH_FUNC_NAME(method, type) |
256 | ASSIGN_FETCH_FUNC(symbol, ftype), \ | 376 | |
257 | ASSIGN_FETCH_FUNC(deref, ftype), \ | 377 | #define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ |
378 | {.name = _name, \ | ||
379 | .size = _size, \ | ||
380 | .is_signed = sign, \ | ||
381 | .print = PRINT_TYPE_FUNC_NAME(ptype), \ | ||
382 | .fmt = PRINT_TYPE_FMT_NAME(ptype), \ | ||
383 | .fmttype = _fmttype, \ | ||
384 | .fetch = { \ | ||
385 | ASSIGN_FETCH_FUNC(reg, ftype), \ | ||
386 | ASSIGN_FETCH_FUNC(stack, ftype), \ | ||
387 | ASSIGN_FETCH_FUNC(retval, ftype), \ | ||
388 | ASSIGN_FETCH_FUNC(memory, ftype), \ | ||
389 | ASSIGN_FETCH_FUNC(symbol, ftype), \ | ||
390 | ASSIGN_FETCH_FUNC(deref, ftype), \ | ||
391 | } \ | ||
258 | } | 392 | } |
259 | 393 | ||
394 | #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ | ||
395 | __ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, #ptype) | ||
396 | |||
397 | #define FETCH_TYPE_STRING 0 | ||
398 | #define FETCH_TYPE_STRSIZE 1 | ||
399 | |||
260 | /* Fetch type information table */ | 400 | /* Fetch type information table */ |
261 | static const struct fetch_type { | 401 | static const struct fetch_type { |
262 | const char *name; /* Name of type */ | 402 | const char *name; /* Name of type */ |
@@ -264,14 +404,16 @@ static const struct fetch_type { | |||
264 | int is_signed; /* Signed flag */ | 404 | int is_signed; /* Signed flag */ |
265 | print_type_func_t print; /* Print functions */ | 405 | print_type_func_t print; /* Print functions */ |
266 | const char *fmt; /* Fromat string */ | 406 | const char *fmt; /* Fromat string */ |
407 | const char *fmttype; /* Name in format file */ | ||
267 | /* Fetch functions */ | 408 | /* Fetch functions */ |
268 | fetch_func_t reg; | 409 | fetch_func_t fetch[FETCH_MTD_END]; |
269 | fetch_func_t stack; | ||
270 | fetch_func_t retval; | ||
271 | fetch_func_t memory; | ||
272 | fetch_func_t symbol; | ||
273 | fetch_func_t deref; | ||
274 | } fetch_type_table[] = { | 410 | } fetch_type_table[] = { |
411 | /* Special types */ | ||
412 | [FETCH_TYPE_STRING] = __ASSIGN_FETCH_TYPE("string", string, string, | ||
413 | sizeof(u32), 1, "__data_loc char[]"), | ||
414 | [FETCH_TYPE_STRSIZE] = __ASSIGN_FETCH_TYPE("string_size", u32, | ||
415 | string_size, sizeof(u32), 0, "u32"), | ||
416 | /* Basic types */ | ||
275 | ASSIGN_FETCH_TYPE(u8, u8, 0), | 417 | ASSIGN_FETCH_TYPE(u8, u8, 0), |
276 | ASSIGN_FETCH_TYPE(u16, u16, 0), | 418 | ASSIGN_FETCH_TYPE(u16, u16, 0), |
277 | ASSIGN_FETCH_TYPE(u32, u32, 0), | 419 | ASSIGN_FETCH_TYPE(u32, u32, 0), |
@@ -302,12 +444,28 @@ static __kprobes void fetch_stack_address(struct pt_regs *regs, | |||
302 | *(unsigned long *)dest = kernel_stack_pointer(regs); | 444 | *(unsigned long *)dest = kernel_stack_pointer(regs); |
303 | } | 445 | } |
304 | 446 | ||
447 | static fetch_func_t get_fetch_size_function(const struct fetch_type *type, | ||
448 | fetch_func_t orig_fn) | ||
449 | { | ||
450 | int i; | ||
451 | |||
452 | if (type != &fetch_type_table[FETCH_TYPE_STRING]) | ||
453 | return NULL; /* Only string type needs size function */ | ||
454 | for (i = 0; i < FETCH_MTD_END; i++) | ||
455 | if (type->fetch[i] == orig_fn) | ||
456 | return fetch_type_table[FETCH_TYPE_STRSIZE].fetch[i]; | ||
457 | |||
458 | WARN_ON(1); /* This should not happen */ | ||
459 | return NULL; | ||
460 | } | ||
461 | |||
305 | /** | 462 | /** |
306 | * Kprobe event core functions | 463 | * Kprobe event core functions |
307 | */ | 464 | */ |
308 | 465 | ||
309 | struct probe_arg { | 466 | struct probe_arg { |
310 | struct fetch_param fetch; | 467 | struct fetch_param fetch; |
468 | struct fetch_param fetch_size; | ||
311 | unsigned int offset; /* Offset from argument entry */ | 469 | unsigned int offset; /* Offset from argument entry */ |
312 | const char *name; /* Name of this argument */ | 470 | const char *name; /* Name of this argument */ |
313 | const char *comm; /* Command of this argument */ | 471 | const char *comm; /* Command of this argument */ |
@@ -429,9 +587,9 @@ error: | |||
429 | 587 | ||
430 | static void free_probe_arg(struct probe_arg *arg) | 588 | static void free_probe_arg(struct probe_arg *arg) |
431 | { | 589 | { |
432 | if (CHECK_BASIC_FETCH_FUNCS(deref, arg->fetch.fn)) | 590 | if (CHECK_FETCH_FUNCS(deref, arg->fetch.fn)) |
433 | free_deref_fetch_param(arg->fetch.data); | 591 | free_deref_fetch_param(arg->fetch.data); |
434 | else if (CHECK_BASIC_FETCH_FUNCS(symbol, arg->fetch.fn)) | 592 | else if (CHECK_FETCH_FUNCS(symbol, arg->fetch.fn)) |
435 | free_symbol_cache(arg->fetch.data); | 593 | free_symbol_cache(arg->fetch.data); |
436 | kfree(arg->name); | 594 | kfree(arg->name); |
437 | kfree(arg->comm); | 595 | kfree(arg->comm); |
@@ -548,7 +706,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
548 | 706 | ||
549 | if (strcmp(arg, "retval") == 0) { | 707 | if (strcmp(arg, "retval") == 0) { |
550 | if (is_return) | 708 | if (is_return) |
551 | f->fn = t->retval; | 709 | f->fn = t->fetch[FETCH_MTD_retval]; |
552 | else | 710 | else |
553 | ret = -EINVAL; | 711 | ret = -EINVAL; |
554 | } else if (strncmp(arg, "stack", 5) == 0) { | 712 | } else if (strncmp(arg, "stack", 5) == 0) { |
@@ -562,7 +720,7 @@ static int parse_probe_vars(char *arg, const struct fetch_type *t, | |||
562 | if (ret || param > PARAM_MAX_STACK) | 720 | if (ret || param > PARAM_MAX_STACK) |
563 | ret = -EINVAL; | 721 | ret = -EINVAL; |
564 | else { | 722 | else { |
565 | f->fn = t->stack; | 723 | f->fn = t->fetch[FETCH_MTD_stack]; |
566 | f->data = (void *)param; | 724 | f->data = (void *)param; |
567 | } | 725 | } |
568 | } else | 726 | } else |
@@ -588,7 +746,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
588 | case '%': /* named register */ | 746 | case '%': /* named register */ |
589 | ret = regs_query_register_offset(arg + 1); | 747 | ret = regs_query_register_offset(arg + 1); |
590 | if (ret >= 0) { | 748 | if (ret >= 0) { |
591 | f->fn = t->reg; | 749 | f->fn = t->fetch[FETCH_MTD_reg]; |
592 | f->data = (void *)(unsigned long)ret; | 750 | f->data = (void *)(unsigned long)ret; |
593 | ret = 0; | 751 | ret = 0; |
594 | } | 752 | } |
@@ -598,7 +756,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
598 | ret = strict_strtoul(arg + 1, 0, ¶m); | 756 | ret = strict_strtoul(arg + 1, 0, ¶m); |
599 | if (ret) | 757 | if (ret) |
600 | break; | 758 | break; |
601 | f->fn = t->memory; | 759 | f->fn = t->fetch[FETCH_MTD_memory]; |
602 | f->data = (void *)param; | 760 | f->data = (void *)param; |
603 | } else { | 761 | } else { |
604 | ret = split_symbol_offset(arg + 1, &offset); | 762 | ret = split_symbol_offset(arg + 1, &offset); |
@@ -606,7 +764,7 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
606 | break; | 764 | break; |
607 | f->data = alloc_symbol_cache(arg + 1, offset); | 765 | f->data = alloc_symbol_cache(arg + 1, offset); |
608 | if (f->data) | 766 | if (f->data) |
609 | f->fn = t->symbol; | 767 | f->fn = t->fetch[FETCH_MTD_symbol]; |
610 | } | 768 | } |
611 | break; | 769 | break; |
612 | case '+': /* deref memory */ | 770 | case '+': /* deref memory */ |
@@ -636,14 +794,17 @@ static int __parse_probe_arg(char *arg, const struct fetch_type *t, | |||
636 | if (ret) | 794 | if (ret) |
637 | kfree(dprm); | 795 | kfree(dprm); |
638 | else { | 796 | else { |
639 | f->fn = t->deref; | 797 | f->fn = t->fetch[FETCH_MTD_deref]; |
640 | f->data = (void *)dprm; | 798 | f->data = (void *)dprm; |
641 | } | 799 | } |
642 | } | 800 | } |
643 | break; | 801 | break; |
644 | } | 802 | } |
645 | if (!ret && !f->fn) | 803 | if (!ret && !f->fn) { /* Parsed, but do not find fetch method */ |
804 | pr_info("%s type has no corresponding fetch method.\n", | ||
805 | t->name); | ||
646 | ret = -EINVAL; | 806 | ret = -EINVAL; |
807 | } | ||
647 | return ret; | 808 | return ret; |
648 | } | 809 | } |
649 | 810 | ||
@@ -652,6 +813,7 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp, | |||
652 | struct probe_arg *parg, int is_return) | 813 | struct probe_arg *parg, int is_return) |
653 | { | 814 | { |
654 | const char *t; | 815 | const char *t; |
816 | int ret; | ||
655 | 817 | ||
656 | if (strlen(arg) > MAX_ARGSTR_LEN) { | 818 | if (strlen(arg) > MAX_ARGSTR_LEN) { |
657 | pr_info("Argument is too long.: %s\n", arg); | 819 | pr_info("Argument is too long.: %s\n", arg); |
@@ -674,7 +836,13 @@ static int parse_probe_arg(char *arg, struct trace_probe *tp, | |||
674 | } | 836 | } |
675 | parg->offset = tp->size; | 837 | parg->offset = tp->size; |
676 | tp->size += parg->type->size; | 838 | tp->size += parg->type->size; |
677 | return __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); | 839 | ret = __parse_probe_arg(arg, parg->type, &parg->fetch, is_return); |
840 | if (ret >= 0) { | ||
841 | parg->fetch_size.fn = get_fetch_size_function(parg->type, | ||
842 | parg->fetch.fn); | ||
843 | parg->fetch_size.data = parg->fetch.data; | ||
844 | } | ||
845 | return ret; | ||
678 | } | 846 | } |
679 | 847 | ||
680 | /* Return 1 if name is reserved or already used by another argument */ | 848 | /* Return 1 if name is reserved or already used by another argument */ |
@@ -757,14 +925,17 @@ static int create_trace_probe(int argc, char **argv) | |||
757 | pr_info("Delete command needs an event name.\n"); | 925 | pr_info("Delete command needs an event name.\n"); |
758 | return -EINVAL; | 926 | return -EINVAL; |
759 | } | 927 | } |
928 | mutex_lock(&probe_lock); | ||
760 | tp = find_probe_event(event, group); | 929 | tp = find_probe_event(event, group); |
761 | if (!tp) { | 930 | if (!tp) { |
931 | mutex_unlock(&probe_lock); | ||
762 | pr_info("Event %s/%s doesn't exist.\n", group, event); | 932 | pr_info("Event %s/%s doesn't exist.\n", group, event); |
763 | return -ENOENT; | 933 | return -ENOENT; |
764 | } | 934 | } |
765 | /* delete an event */ | 935 | /* delete an event */ |
766 | unregister_trace_probe(tp); | 936 | unregister_trace_probe(tp); |
767 | free_trace_probe(tp); | 937 | free_trace_probe(tp); |
938 | mutex_unlock(&probe_lock); | ||
768 | return 0; | 939 | return 0; |
769 | } | 940 | } |
770 | 941 | ||
@@ -1043,6 +1214,54 @@ static const struct file_operations kprobe_profile_ops = { | |||
1043 | .release = seq_release, | 1214 | .release = seq_release, |
1044 | }; | 1215 | }; |
1045 | 1216 | ||
1217 | /* Sum up total data length for dynamic arraies (strings) */ | ||
1218 | static __kprobes int __get_data_size(struct trace_probe *tp, | ||
1219 | struct pt_regs *regs) | ||
1220 | { | ||
1221 | int i, ret = 0; | ||
1222 | u32 len; | ||
1223 | |||
1224 | for (i = 0; i < tp->nr_args; i++) | ||
1225 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
1226 | call_fetch(&tp->args[i].fetch_size, regs, &len); | ||
1227 | ret += len; | ||
1228 | } | ||
1229 | |||
1230 | return ret; | ||
1231 | } | ||
1232 | |||
1233 | /* Store the value of each argument */ | ||
1234 | static __kprobes void store_trace_args(int ent_size, struct trace_probe *tp, | ||
1235 | struct pt_regs *regs, | ||
1236 | u8 *data, int maxlen) | ||
1237 | { | ||
1238 | int i; | ||
1239 | u32 end = tp->size; | ||
1240 | u32 *dl; /* Data (relative) location */ | ||
1241 | |||
1242 | for (i = 0; i < tp->nr_args; i++) { | ||
1243 | if (unlikely(tp->args[i].fetch_size.fn)) { | ||
1244 | /* | ||
1245 | * First, we set the relative location and | ||
1246 | * maximum data length to *dl | ||
1247 | */ | ||
1248 | dl = (u32 *)(data + tp->args[i].offset); | ||
1249 | *dl = make_data_rloc(maxlen, end - tp->args[i].offset); | ||
1250 | /* Then try to fetch string or dynamic array data */ | ||
1251 | call_fetch(&tp->args[i].fetch, regs, dl); | ||
1252 | /* Reduce maximum length */ | ||
1253 | end += get_rloc_len(*dl); | ||
1254 | maxlen -= get_rloc_len(*dl); | ||
1255 | /* Trick here, convert data_rloc to data_loc */ | ||
1256 | *dl = convert_rloc_to_loc(*dl, | ||
1257 | ent_size + tp->args[i].offset); | ||
1258 | } else | ||
1259 | /* Just fetching data normally */ | ||
1260 | call_fetch(&tp->args[i].fetch, regs, | ||
1261 | data + tp->args[i].offset); | ||
1262 | } | ||
1263 | } | ||
1264 | |||
1046 | /* Kprobe handler */ | 1265 | /* Kprobe handler */ |
1047 | static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | 1266 | static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) |
1048 | { | 1267 | { |
@@ -1050,8 +1269,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1050 | struct kprobe_trace_entry_head *entry; | 1269 | struct kprobe_trace_entry_head *entry; |
1051 | struct ring_buffer_event *event; | 1270 | struct ring_buffer_event *event; |
1052 | struct ring_buffer *buffer; | 1271 | struct ring_buffer *buffer; |
1053 | u8 *data; | 1272 | int size, dsize, pc; |
1054 | int size, i, pc; | ||
1055 | unsigned long irq_flags; | 1273 | unsigned long irq_flags; |
1056 | struct ftrace_event_call *call = &tp->call; | 1274 | struct ftrace_event_call *call = &tp->call; |
1057 | 1275 | ||
@@ -1060,7 +1278,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1060 | local_save_flags(irq_flags); | 1278 | local_save_flags(irq_flags); |
1061 | pc = preempt_count(); | 1279 | pc = preempt_count(); |
1062 | 1280 | ||
1063 | size = sizeof(*entry) + tp->size; | 1281 | dsize = __get_data_size(tp, regs); |
1282 | size = sizeof(*entry) + tp->size + dsize; | ||
1064 | 1283 | ||
1065 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 1284 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
1066 | size, irq_flags, pc); | 1285 | size, irq_flags, pc); |
@@ -1069,9 +1288,7 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1069 | 1288 | ||
1070 | entry = ring_buffer_event_data(event); | 1289 | entry = ring_buffer_event_data(event); |
1071 | entry->ip = (unsigned long)kp->addr; | 1290 | entry->ip = (unsigned long)kp->addr; |
1072 | data = (u8 *)&entry[1]; | 1291 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1073 | for (i = 0; i < tp->nr_args; i++) | ||
1074 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1075 | 1292 | ||
1076 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1293 | if (!filter_current_check_discard(buffer, call, entry, event)) |
1077 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1294 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
@@ -1085,15 +1302,15 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1085 | struct kretprobe_trace_entry_head *entry; | 1302 | struct kretprobe_trace_entry_head *entry; |
1086 | struct ring_buffer_event *event; | 1303 | struct ring_buffer_event *event; |
1087 | struct ring_buffer *buffer; | 1304 | struct ring_buffer *buffer; |
1088 | u8 *data; | 1305 | int size, pc, dsize; |
1089 | int size, i, pc; | ||
1090 | unsigned long irq_flags; | 1306 | unsigned long irq_flags; |
1091 | struct ftrace_event_call *call = &tp->call; | 1307 | struct ftrace_event_call *call = &tp->call; |
1092 | 1308 | ||
1093 | local_save_flags(irq_flags); | 1309 | local_save_flags(irq_flags); |
1094 | pc = preempt_count(); | 1310 | pc = preempt_count(); |
1095 | 1311 | ||
1096 | size = sizeof(*entry) + tp->size; | 1312 | dsize = __get_data_size(tp, regs); |
1313 | size = sizeof(*entry) + tp->size + dsize; | ||
1097 | 1314 | ||
1098 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 1315 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
1099 | size, irq_flags, pc); | 1316 | size, irq_flags, pc); |
@@ -1103,9 +1320,7 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1103 | entry = ring_buffer_event_data(event); | 1320 | entry = ring_buffer_event_data(event); |
1104 | entry->func = (unsigned long)tp->rp.kp.addr; | 1321 | entry->func = (unsigned long)tp->rp.kp.addr; |
1105 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1322 | entry->ret_ip = (unsigned long)ri->ret_addr; |
1106 | data = (u8 *)&entry[1]; | 1323 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1107 | for (i = 0; i < tp->nr_args; i++) | ||
1108 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1109 | 1324 | ||
1110 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1325 | if (!filter_current_check_discard(buffer, call, entry, event)) |
1111 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1326 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
@@ -1137,7 +1352,7 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
1137 | data = (u8 *)&field[1]; | 1352 | data = (u8 *)&field[1]; |
1138 | for (i = 0; i < tp->nr_args; i++) | 1353 | for (i = 0; i < tp->nr_args; i++) |
1139 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1354 | if (!tp->args[i].type->print(s, tp->args[i].name, |
1140 | data + tp->args[i].offset)) | 1355 | data + tp->args[i].offset, field)) |
1141 | goto partial; | 1356 | goto partial; |
1142 | 1357 | ||
1143 | if (!trace_seq_puts(s, "\n")) | 1358 | if (!trace_seq_puts(s, "\n")) |
@@ -1179,7 +1394,7 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
1179 | data = (u8 *)&field[1]; | 1394 | data = (u8 *)&field[1]; |
1180 | for (i = 0; i < tp->nr_args; i++) | 1395 | for (i = 0; i < tp->nr_args; i++) |
1181 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1396 | if (!tp->args[i].type->print(s, tp->args[i].name, |
1182 | data + tp->args[i].offset)) | 1397 | data + tp->args[i].offset, field)) |
1183 | goto partial; | 1398 | goto partial; |
1184 | 1399 | ||
1185 | if (!trace_seq_puts(s, "\n")) | 1400 | if (!trace_seq_puts(s, "\n")) |
@@ -1214,11 +1429,6 @@ static void probe_event_disable(struct ftrace_event_call *call) | |||
1214 | } | 1429 | } |
1215 | } | 1430 | } |
1216 | 1431 | ||
1217 | static int probe_event_raw_init(struct ftrace_event_call *event_call) | ||
1218 | { | ||
1219 | return 0; | ||
1220 | } | ||
1221 | |||
1222 | #undef DEFINE_FIELD | 1432 | #undef DEFINE_FIELD |
1223 | #define DEFINE_FIELD(type, item, name, is_signed) \ | 1433 | #define DEFINE_FIELD(type, item, name, is_signed) \ |
1224 | do { \ | 1434 | do { \ |
@@ -1239,7 +1449,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1239 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1449 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
1240 | /* Set argument names as fields */ | 1450 | /* Set argument names as fields */ |
1241 | for (i = 0; i < tp->nr_args; i++) { | 1451 | for (i = 0; i < tp->nr_args; i++) { |
1242 | ret = trace_define_field(event_call, tp->args[i].type->name, | 1452 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, |
1243 | tp->args[i].name, | 1453 | tp->args[i].name, |
1244 | sizeof(field) + tp->args[i].offset, | 1454 | sizeof(field) + tp->args[i].offset, |
1245 | tp->args[i].type->size, | 1455 | tp->args[i].type->size, |
@@ -1261,7 +1471,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1261 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 1471 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
1262 | /* Set argument names as fields */ | 1472 | /* Set argument names as fields */ |
1263 | for (i = 0; i < tp->nr_args; i++) { | 1473 | for (i = 0; i < tp->nr_args; i++) { |
1264 | ret = trace_define_field(event_call, tp->args[i].type->name, | 1474 | ret = trace_define_field(event_call, tp->args[i].type->fmttype, |
1265 | tp->args[i].name, | 1475 | tp->args[i].name, |
1266 | sizeof(field) + tp->args[i].offset, | 1476 | sizeof(field) + tp->args[i].offset, |
1267 | tp->args[i].type->size, | 1477 | tp->args[i].type->size, |
@@ -1301,8 +1511,13 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len) | |||
1301 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); | 1511 | pos += snprintf(buf + pos, LEN_OR_ZERO, "\", %s", arg); |
1302 | 1512 | ||
1303 | for (i = 0; i < tp->nr_args; i++) { | 1513 | for (i = 0; i < tp->nr_args; i++) { |
1304 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | 1514 | if (strcmp(tp->args[i].type->name, "string") == 0) |
1305 | tp->args[i].name); | 1515 | pos += snprintf(buf + pos, LEN_OR_ZERO, |
1516 | ", __get_str(%s)", | ||
1517 | tp->args[i].name); | ||
1518 | else | ||
1519 | pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", | ||
1520 | tp->args[i].name); | ||
1306 | } | 1521 | } |
1307 | 1522 | ||
1308 | #undef LEN_OR_ZERO | 1523 | #undef LEN_OR_ZERO |
@@ -1339,11 +1554,11 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
1339 | struct ftrace_event_call *call = &tp->call; | 1554 | struct ftrace_event_call *call = &tp->call; |
1340 | struct kprobe_trace_entry_head *entry; | 1555 | struct kprobe_trace_entry_head *entry; |
1341 | struct hlist_head *head; | 1556 | struct hlist_head *head; |
1342 | u8 *data; | 1557 | int size, __size, dsize; |
1343 | int size, __size, i; | ||
1344 | int rctx; | 1558 | int rctx; |
1345 | 1559 | ||
1346 | __size = sizeof(*entry) + tp->size; | 1560 | dsize = __get_data_size(tp, regs); |
1561 | __size = sizeof(*entry) + tp->size + dsize; | ||
1347 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1562 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1348 | size -= sizeof(u32); | 1563 | size -= sizeof(u32); |
1349 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | 1564 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
@@ -1355,9 +1570,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
1355 | return; | 1570 | return; |
1356 | 1571 | ||
1357 | entry->ip = (unsigned long)kp->addr; | 1572 | entry->ip = (unsigned long)kp->addr; |
1358 | data = (u8 *)&entry[1]; | 1573 | memset(&entry[1], 0, dsize); |
1359 | for (i = 0; i < tp->nr_args; i++) | 1574 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1360 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1361 | 1575 | ||
1362 | head = this_cpu_ptr(call->perf_events); | 1576 | head = this_cpu_ptr(call->perf_events); |
1363 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); | 1577 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); |
@@ -1371,11 +1585,11 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1371 | struct ftrace_event_call *call = &tp->call; | 1585 | struct ftrace_event_call *call = &tp->call; |
1372 | struct kretprobe_trace_entry_head *entry; | 1586 | struct kretprobe_trace_entry_head *entry; |
1373 | struct hlist_head *head; | 1587 | struct hlist_head *head; |
1374 | u8 *data; | 1588 | int size, __size, dsize; |
1375 | int size, __size, i; | ||
1376 | int rctx; | 1589 | int rctx; |
1377 | 1590 | ||
1378 | __size = sizeof(*entry) + tp->size; | 1591 | dsize = __get_data_size(tp, regs); |
1592 | __size = sizeof(*entry) + tp->size + dsize; | ||
1379 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | 1593 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); |
1380 | size -= sizeof(u32); | 1594 | size -= sizeof(u32); |
1381 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, | 1595 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, |
@@ -1388,9 +1602,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1388 | 1602 | ||
1389 | entry->func = (unsigned long)tp->rp.kp.addr; | 1603 | entry->func = (unsigned long)tp->rp.kp.addr; |
1390 | entry->ret_ip = (unsigned long)ri->ret_addr; | 1604 | entry->ret_ip = (unsigned long)ri->ret_addr; |
1391 | data = (u8 *)&entry[1]; | 1605 | store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); |
1392 | for (i = 0; i < tp->nr_args; i++) | ||
1393 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | ||
1394 | 1606 | ||
1395 | head = this_cpu_ptr(call->perf_events); | 1607 | head = this_cpu_ptr(call->perf_events); |
1396 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); | 1608 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); |
@@ -1486,15 +1698,12 @@ static int register_probe_event(struct trace_probe *tp) | |||
1486 | int ret; | 1698 | int ret; |
1487 | 1699 | ||
1488 | /* Initialize ftrace_event_call */ | 1700 | /* Initialize ftrace_event_call */ |
1701 | INIT_LIST_HEAD(&call->class->fields); | ||
1489 | if (probe_is_return(tp)) { | 1702 | if (probe_is_return(tp)) { |
1490 | INIT_LIST_HEAD(&call->class->fields); | ||
1491 | call->event.funcs = &kretprobe_funcs; | 1703 | call->event.funcs = &kretprobe_funcs; |
1492 | call->class->raw_init = probe_event_raw_init; | ||
1493 | call->class->define_fields = kretprobe_event_define_fields; | 1704 | call->class->define_fields = kretprobe_event_define_fields; |
1494 | } else { | 1705 | } else { |
1495 | INIT_LIST_HEAD(&call->class->fields); | ||
1496 | call->event.funcs = &kprobe_funcs; | 1706 | call->event.funcs = &kprobe_funcs; |
1497 | call->class->raw_init = probe_event_raw_init; | ||
1498 | call->class->define_fields = kprobe_event_define_fields; | 1707 | call->class->define_fields = kprobe_event_define_fields; |
1499 | } | 1708 | } |
1500 | if (set_print_fmt(tp) < 0) | 1709 | if (set_print_fmt(tp) < 0) |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c deleted file mode 100644 index 8eaf00749b65..000000000000 --- a/kernel/trace/trace_ksym.c +++ /dev/null | |||
@@ -1,508 +0,0 @@ | |||
1 | /* | ||
2 | * trace_ksym.c - Kernel Symbol Tracer | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2009 | ||
19 | */ | ||
20 | |||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/uaccess.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/ftrace.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/fs.h> | ||
28 | |||
29 | #include "trace_output.h" | ||
30 | #include "trace.h" | ||
31 | |||
32 | #include <linux/hw_breakpoint.h> | ||
33 | #include <asm/hw_breakpoint.h> | ||
34 | |||
35 | #include <asm/atomic.h> | ||
36 | |||
37 | #define KSYM_TRACER_OP_LEN 3 /* rw- */ | ||
38 | |||
39 | struct trace_ksym { | ||
40 | struct perf_event **ksym_hbp; | ||
41 | struct perf_event_attr attr; | ||
42 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
43 | atomic64_t counter; | ||
44 | #endif | ||
45 | struct hlist_node ksym_hlist; | ||
46 | }; | ||
47 | |||
48 | static struct trace_array *ksym_trace_array; | ||
49 | |||
50 | static unsigned int ksym_tracing_enabled; | ||
51 | |||
52 | static HLIST_HEAD(ksym_filter_head); | ||
53 | |||
54 | static DEFINE_MUTEX(ksym_tracer_mutex); | ||
55 | |||
56 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
57 | |||
58 | #define MAX_UL_INT 0xffffffff | ||
59 | |||
60 | void ksym_collect_stats(unsigned long hbp_hit_addr) | ||
61 | { | ||
62 | struct hlist_node *node; | ||
63 | struct trace_ksym *entry; | ||
64 | |||
65 | rcu_read_lock(); | ||
66 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | ||
67 | if (entry->attr.bp_addr == hbp_hit_addr) { | ||
68 | atomic64_inc(&entry->counter); | ||
69 | break; | ||
70 | } | ||
71 | } | ||
72 | rcu_read_unlock(); | ||
73 | } | ||
74 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
75 | |||
76 | void ksym_hbp_handler(struct perf_event *hbp, int nmi, | ||
77 | struct perf_sample_data *data, | ||
78 | struct pt_regs *regs) | ||
79 | { | ||
80 | struct ring_buffer_event *event; | ||
81 | struct ksym_trace_entry *entry; | ||
82 | struct ring_buffer *buffer; | ||
83 | int pc; | ||
84 | |||
85 | if (!ksym_tracing_enabled) | ||
86 | return; | ||
87 | |||
88 | buffer = ksym_trace_array->buffer; | ||
89 | |||
90 | pc = preempt_count(); | ||
91 | |||
92 | event = trace_buffer_lock_reserve(buffer, TRACE_KSYM, | ||
93 | sizeof(*entry), 0, pc); | ||
94 | if (!event) | ||
95 | return; | ||
96 | |||
97 | entry = ring_buffer_event_data(event); | ||
98 | entry->ip = instruction_pointer(regs); | ||
99 | entry->type = hw_breakpoint_type(hbp); | ||
100 | entry->addr = hw_breakpoint_addr(hbp); | ||
101 | strlcpy(entry->cmd, current->comm, TASK_COMM_LEN); | ||
102 | |||
103 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
104 | ksym_collect_stats(hw_breakpoint_addr(hbp)); | ||
105 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
106 | |||
107 | trace_buffer_unlock_commit(buffer, event, 0, pc); | ||
108 | } | ||
109 | |||
110 | /* Valid access types are represented as | ||
111 | * | ||
112 | * rw- : Set Read/Write Access Breakpoint | ||
113 | * -w- : Set Write Access Breakpoint | ||
114 | * --- : Clear Breakpoints | ||
115 | * --x : Set Execution Break points (Not available yet) | ||
116 | * | ||
117 | */ | ||
118 | static int ksym_trace_get_access_type(char *str) | ||
119 | { | ||
120 | int access = 0; | ||
121 | |||
122 | if (str[0] == 'r') | ||
123 | access |= HW_BREAKPOINT_R; | ||
124 | |||
125 | if (str[1] == 'w') | ||
126 | access |= HW_BREAKPOINT_W; | ||
127 | |||
128 | if (str[2] == 'x') | ||
129 | access |= HW_BREAKPOINT_X; | ||
130 | |||
131 | switch (access) { | ||
132 | case HW_BREAKPOINT_R: | ||
133 | case HW_BREAKPOINT_W: | ||
134 | case HW_BREAKPOINT_W | HW_BREAKPOINT_R: | ||
135 | return access; | ||
136 | default: | ||
137 | return -EINVAL; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * There can be several possible malformed requests and we attempt to capture | ||
143 | * all of them. We enumerate some of the rules | ||
144 | * 1. We will not allow kernel symbols with ':' since it is used as a delimiter. | ||
145 | * i.e. multiple ':' symbols disallowed. Possible uses are of the form | ||
146 | * <module>:<ksym_name>:<op>. | ||
147 | * 2. No delimiter symbol ':' in the input string | ||
148 | * 3. Spurious operator symbols or symbols not in their respective positions | ||
149 | * 4. <ksym_name>:--- i.e. clear breakpoint request when ksym_name not in file | ||
150 | * 5. Kernel symbol not a part of /proc/kallsyms | ||
151 | * 6. Duplicate requests | ||
152 | */ | ||
153 | static int parse_ksym_trace_str(char *input_string, char **ksymname, | ||
154 | unsigned long *addr) | ||
155 | { | ||
156 | int ret; | ||
157 | |||
158 | *ksymname = strsep(&input_string, ":"); | ||
159 | *addr = kallsyms_lookup_name(*ksymname); | ||
160 | |||
161 | /* Check for malformed request: (2), (1) and (5) */ | ||
162 | if ((!input_string) || | ||
163 | (strlen(input_string) != KSYM_TRACER_OP_LEN) || | ||
164 | (*addr == 0)) | ||
165 | return -EINVAL;; | ||
166 | |||
167 | ret = ksym_trace_get_access_type(input_string); | ||
168 | |||
169 | return ret; | ||
170 | } | ||
171 | |||
172 | int process_new_ksym_entry(char *ksymname, int op, unsigned long addr) | ||
173 | { | ||
174 | struct trace_ksym *entry; | ||
175 | int ret = -ENOMEM; | ||
176 | |||
177 | entry = kzalloc(sizeof(struct trace_ksym), GFP_KERNEL); | ||
178 | if (!entry) | ||
179 | return -ENOMEM; | ||
180 | |||
181 | hw_breakpoint_init(&entry->attr); | ||
182 | |||
183 | entry->attr.bp_type = op; | ||
184 | entry->attr.bp_addr = addr; | ||
185 | entry->attr.bp_len = HW_BREAKPOINT_LEN_4; | ||
186 | |||
187 | entry->ksym_hbp = register_wide_hw_breakpoint(&entry->attr, | ||
188 | ksym_hbp_handler); | ||
189 | |||
190 | if (IS_ERR(entry->ksym_hbp)) { | ||
191 | ret = PTR_ERR(entry->ksym_hbp); | ||
192 | if (ret == -ENOSPC) { | ||
193 | printk(KERN_ERR "ksym_tracer: Maximum limit reached." | ||
194 | " No new requests for tracing can be accepted now.\n"); | ||
195 | } else { | ||
196 | printk(KERN_INFO "ksym_tracer request failed. Try again" | ||
197 | " later!!\n"); | ||
198 | } | ||
199 | goto err; | ||
200 | } | ||
201 | |||
202 | hlist_add_head_rcu(&(entry->ksym_hlist), &ksym_filter_head); | ||
203 | |||
204 | return 0; | ||
205 | |||
206 | err: | ||
207 | kfree(entry); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static ssize_t ksym_trace_filter_read(struct file *filp, char __user *ubuf, | ||
213 | size_t count, loff_t *ppos) | ||
214 | { | ||
215 | struct trace_ksym *entry; | ||
216 | struct hlist_node *node; | ||
217 | struct trace_seq *s; | ||
218 | ssize_t cnt = 0; | ||
219 | int ret; | ||
220 | |||
221 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
222 | if (!s) | ||
223 | return -ENOMEM; | ||
224 | trace_seq_init(s); | ||
225 | |||
226 | mutex_lock(&ksym_tracer_mutex); | ||
227 | |||
228 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
229 | ret = trace_seq_printf(s, "%pS:", | ||
230 | (void *)(unsigned long)entry->attr.bp_addr); | ||
231 | if (entry->attr.bp_type == HW_BREAKPOINT_R) | ||
232 | ret = trace_seq_puts(s, "r--\n"); | ||
233 | else if (entry->attr.bp_type == HW_BREAKPOINT_W) | ||
234 | ret = trace_seq_puts(s, "-w-\n"); | ||
235 | else if (entry->attr.bp_type == (HW_BREAKPOINT_W | HW_BREAKPOINT_R)) | ||
236 | ret = trace_seq_puts(s, "rw-\n"); | ||
237 | WARN_ON_ONCE(!ret); | ||
238 | } | ||
239 | |||
240 | cnt = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | ||
241 | |||
242 | mutex_unlock(&ksym_tracer_mutex); | ||
243 | |||
244 | kfree(s); | ||
245 | |||
246 | return cnt; | ||
247 | } | ||
248 | |||
249 | static void __ksym_trace_reset(void) | ||
250 | { | ||
251 | struct trace_ksym *entry; | ||
252 | struct hlist_node *node, *node1; | ||
253 | |||
254 | mutex_lock(&ksym_tracer_mutex); | ||
255 | hlist_for_each_entry_safe(entry, node, node1, &ksym_filter_head, | ||
256 | ksym_hlist) { | ||
257 | unregister_wide_hw_breakpoint(entry->ksym_hbp); | ||
258 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
259 | synchronize_rcu(); | ||
260 | kfree(entry); | ||
261 | } | ||
262 | mutex_unlock(&ksym_tracer_mutex); | ||
263 | } | ||
264 | |||
265 | static ssize_t ksym_trace_filter_write(struct file *file, | ||
266 | const char __user *buffer, | ||
267 | size_t count, loff_t *ppos) | ||
268 | { | ||
269 | struct trace_ksym *entry; | ||
270 | struct hlist_node *node; | ||
271 | char *buf, *input_string, *ksymname = NULL; | ||
272 | unsigned long ksym_addr = 0; | ||
273 | int ret, op, changed = 0; | ||
274 | |||
275 | buf = kzalloc(count + 1, GFP_KERNEL); | ||
276 | if (!buf) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | ret = -EFAULT; | ||
280 | if (copy_from_user(buf, buffer, count)) | ||
281 | goto out; | ||
282 | |||
283 | buf[count] = '\0'; | ||
284 | input_string = strstrip(buf); | ||
285 | |||
286 | /* | ||
287 | * Clear all breakpoints if: | ||
288 | * 1: echo > ksym_trace_filter | ||
289 | * 2: echo 0 > ksym_trace_filter | ||
290 | * 3: echo "*:---" > ksym_trace_filter | ||
291 | */ | ||
292 | if (!input_string[0] || !strcmp(input_string, "0") || | ||
293 | !strcmp(input_string, "*:---")) { | ||
294 | __ksym_trace_reset(); | ||
295 | ret = 0; | ||
296 | goto out; | ||
297 | } | ||
298 | |||
299 | ret = op = parse_ksym_trace_str(input_string, &ksymname, &ksym_addr); | ||
300 | if (ret < 0) | ||
301 | goto out; | ||
302 | |||
303 | mutex_lock(&ksym_tracer_mutex); | ||
304 | |||
305 | ret = -EINVAL; | ||
306 | hlist_for_each_entry(entry, node, &ksym_filter_head, ksym_hlist) { | ||
307 | if (entry->attr.bp_addr == ksym_addr) { | ||
308 | /* Check for malformed request: (6) */ | ||
309 | if (entry->attr.bp_type != op) | ||
310 | changed = 1; | ||
311 | else | ||
312 | goto out_unlock; | ||
313 | break; | ||
314 | } | ||
315 | } | ||
316 | if (changed) { | ||
317 | unregister_wide_hw_breakpoint(entry->ksym_hbp); | ||
318 | entry->attr.bp_type = op; | ||
319 | ret = 0; | ||
320 | if (op > 0) { | ||
321 | entry->ksym_hbp = | ||
322 | register_wide_hw_breakpoint(&entry->attr, | ||
323 | ksym_hbp_handler); | ||
324 | if (IS_ERR(entry->ksym_hbp)) | ||
325 | ret = PTR_ERR(entry->ksym_hbp); | ||
326 | else | ||
327 | goto out_unlock; | ||
328 | } | ||
329 | /* Error or "symbol:---" case: drop it */ | ||
330 | hlist_del_rcu(&(entry->ksym_hlist)); | ||
331 | synchronize_rcu(); | ||
332 | kfree(entry); | ||
333 | goto out_unlock; | ||
334 | } else { | ||
335 | /* Check for malformed request: (4) */ | ||
336 | if (op) | ||
337 | ret = process_new_ksym_entry(ksymname, op, ksym_addr); | ||
338 | } | ||
339 | out_unlock: | ||
340 | mutex_unlock(&ksym_tracer_mutex); | ||
341 | out: | ||
342 | kfree(buf); | ||
343 | return !ret ? count : ret; | ||
344 | } | ||
345 | |||
346 | static const struct file_operations ksym_tracing_fops = { | ||
347 | .open = tracing_open_generic, | ||
348 | .read = ksym_trace_filter_read, | ||
349 | .write = ksym_trace_filter_write, | ||
350 | }; | ||
351 | |||
352 | static void ksym_trace_reset(struct trace_array *tr) | ||
353 | { | ||
354 | ksym_tracing_enabled = 0; | ||
355 | __ksym_trace_reset(); | ||
356 | } | ||
357 | |||
358 | static int ksym_trace_init(struct trace_array *tr) | ||
359 | { | ||
360 | int cpu, ret = 0; | ||
361 | |||
362 | for_each_online_cpu(cpu) | ||
363 | tracing_reset(tr, cpu); | ||
364 | ksym_tracing_enabled = 1; | ||
365 | ksym_trace_array = tr; | ||
366 | |||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | static void ksym_trace_print_header(struct seq_file *m) | ||
371 | { | ||
372 | seq_puts(m, | ||
373 | "# TASK-PID CPU# Symbol " | ||
374 | "Type Function\n"); | ||
375 | seq_puts(m, | ||
376 | "# | | | " | ||
377 | " | |\n"); | ||
378 | } | ||
379 | |||
380 | static enum print_line_t ksym_trace_output(struct trace_iterator *iter) | ||
381 | { | ||
382 | struct trace_entry *entry = iter->ent; | ||
383 | struct trace_seq *s = &iter->seq; | ||
384 | struct ksym_trace_entry *field; | ||
385 | char str[KSYM_SYMBOL_LEN]; | ||
386 | int ret; | ||
387 | |||
388 | if (entry->type != TRACE_KSYM) | ||
389 | return TRACE_TYPE_UNHANDLED; | ||
390 | |||
391 | trace_assign_type(field, entry); | ||
392 | |||
393 | ret = trace_seq_printf(s, "%11s-%-5d [%03d] %pS", field->cmd, | ||
394 | entry->pid, iter->cpu, (char *)field->addr); | ||
395 | if (!ret) | ||
396 | return TRACE_TYPE_PARTIAL_LINE; | ||
397 | |||
398 | switch (field->type) { | ||
399 | case HW_BREAKPOINT_R: | ||
400 | ret = trace_seq_printf(s, " R "); | ||
401 | break; | ||
402 | case HW_BREAKPOINT_W: | ||
403 | ret = trace_seq_printf(s, " W "); | ||
404 | break; | ||
405 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
406 | ret = trace_seq_printf(s, " RW "); | ||
407 | break; | ||
408 | default: | ||
409 | return TRACE_TYPE_PARTIAL_LINE; | ||
410 | } | ||
411 | |||
412 | if (!ret) | ||
413 | return TRACE_TYPE_PARTIAL_LINE; | ||
414 | |||
415 | sprint_symbol(str, field->ip); | ||
416 | ret = trace_seq_printf(s, "%s\n", str); | ||
417 | if (!ret) | ||
418 | return TRACE_TYPE_PARTIAL_LINE; | ||
419 | |||
420 | return TRACE_TYPE_HANDLED; | ||
421 | } | ||
422 | |||
423 | struct tracer ksym_tracer __read_mostly = | ||
424 | { | ||
425 | .name = "ksym_tracer", | ||
426 | .init = ksym_trace_init, | ||
427 | .reset = ksym_trace_reset, | ||
428 | #ifdef CONFIG_FTRACE_SELFTEST | ||
429 | .selftest = trace_selftest_startup_ksym, | ||
430 | #endif | ||
431 | .print_header = ksym_trace_print_header, | ||
432 | .print_line = ksym_trace_output | ||
433 | }; | ||
434 | |||
435 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
436 | static int ksym_profile_show(struct seq_file *m, void *v) | ||
437 | { | ||
438 | struct hlist_node *node; | ||
439 | struct trace_ksym *entry; | ||
440 | int access_type = 0; | ||
441 | char fn_name[KSYM_NAME_LEN]; | ||
442 | |||
443 | seq_puts(m, " Access Type "); | ||
444 | seq_puts(m, " Symbol Counter\n"); | ||
445 | seq_puts(m, " ----------- "); | ||
446 | seq_puts(m, " ------ -------\n"); | ||
447 | |||
448 | rcu_read_lock(); | ||
449 | hlist_for_each_entry_rcu(entry, node, &ksym_filter_head, ksym_hlist) { | ||
450 | |||
451 | access_type = entry->attr.bp_type; | ||
452 | |||
453 | switch (access_type) { | ||
454 | case HW_BREAKPOINT_R: | ||
455 | seq_puts(m, " R "); | ||
456 | break; | ||
457 | case HW_BREAKPOINT_W: | ||
458 | seq_puts(m, " W "); | ||
459 | break; | ||
460 | case HW_BREAKPOINT_R | HW_BREAKPOINT_W: | ||
461 | seq_puts(m, " RW "); | ||
462 | break; | ||
463 | default: | ||
464 | seq_puts(m, " NA "); | ||
465 | } | ||
466 | |||
467 | if (lookup_symbol_name(entry->attr.bp_addr, fn_name) >= 0) | ||
468 | seq_printf(m, " %-36s", fn_name); | ||
469 | else | ||
470 | seq_printf(m, " %-36s", "<NA>"); | ||
471 | seq_printf(m, " %15llu\n", | ||
472 | (unsigned long long)atomic64_read(&entry->counter)); | ||
473 | } | ||
474 | rcu_read_unlock(); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | static int ksym_profile_open(struct inode *node, struct file *file) | ||
480 | { | ||
481 | return single_open(file, ksym_profile_show, NULL); | ||
482 | } | ||
483 | |||
484 | static const struct file_operations ksym_profile_fops = { | ||
485 | .open = ksym_profile_open, | ||
486 | .read = seq_read, | ||
487 | .llseek = seq_lseek, | ||
488 | .release = single_release, | ||
489 | }; | ||
490 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | ||
491 | |||
492 | __init static int init_ksym_trace(void) | ||
493 | { | ||
494 | struct dentry *d_tracer; | ||
495 | |||
496 | d_tracer = tracing_init_dentry(); | ||
497 | |||
498 | trace_create_file("ksym_trace_filter", 0644, d_tracer, | ||
499 | NULL, &ksym_tracing_fops); | ||
500 | |||
501 | #ifdef CONFIG_PROFILE_KSYM_TRACER | ||
502 | trace_create_file("ksym_profile", 0444, d_tracer, | ||
503 | NULL, &ksym_profile_fops); | ||
504 | #endif | ||
505 | |||
506 | return register_tracer(&ksym_tracer); | ||
507 | } | ||
508 | device_initcall(init_ksym_trace); | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 57c1b4596470..02272baa2206 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -16,9 +16,6 @@ | |||
16 | 16 | ||
17 | DECLARE_RWSEM(trace_event_mutex); | 17 | DECLARE_RWSEM(trace_event_mutex); |
18 | 18 | ||
19 | DEFINE_PER_CPU(struct trace_seq, ftrace_event_seq); | ||
20 | EXPORT_PER_CPU_SYMBOL(ftrace_event_seq); | ||
21 | |||
22 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | 19 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; |
23 | 20 | ||
24 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 21 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
@@ -1069,65 +1066,6 @@ static struct trace_event trace_wake_event = { | |||
1069 | .funcs = &trace_wake_funcs, | 1066 | .funcs = &trace_wake_funcs, |
1070 | }; | 1067 | }; |
1071 | 1068 | ||
1072 | /* TRACE_SPECIAL */ | ||
1073 | static enum print_line_t trace_special_print(struct trace_iterator *iter, | ||
1074 | int flags, struct trace_event *event) | ||
1075 | { | ||
1076 | struct special_entry *field; | ||
1077 | |||
1078 | trace_assign_type(field, iter->ent); | ||
1079 | |||
1080 | if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n", | ||
1081 | field->arg1, | ||
1082 | field->arg2, | ||
1083 | field->arg3)) | ||
1084 | return TRACE_TYPE_PARTIAL_LINE; | ||
1085 | |||
1086 | return TRACE_TYPE_HANDLED; | ||
1087 | } | ||
1088 | |||
1089 | static enum print_line_t trace_special_hex(struct trace_iterator *iter, | ||
1090 | int flags, struct trace_event *event) | ||
1091 | { | ||
1092 | struct special_entry *field; | ||
1093 | struct trace_seq *s = &iter->seq; | ||
1094 | |||
1095 | trace_assign_type(field, iter->ent); | ||
1096 | |||
1097 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | ||
1098 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | ||
1099 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | ||
1100 | |||
1101 | return TRACE_TYPE_HANDLED; | ||
1102 | } | ||
1103 | |||
1104 | static enum print_line_t trace_special_bin(struct trace_iterator *iter, | ||
1105 | int flags, struct trace_event *event) | ||
1106 | { | ||
1107 | struct special_entry *field; | ||
1108 | struct trace_seq *s = &iter->seq; | ||
1109 | |||
1110 | trace_assign_type(field, iter->ent); | ||
1111 | |||
1112 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
1113 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
1114 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
1115 | |||
1116 | return TRACE_TYPE_HANDLED; | ||
1117 | } | ||
1118 | |||
1119 | static struct trace_event_functions trace_special_funcs = { | ||
1120 | .trace = trace_special_print, | ||
1121 | .raw = trace_special_print, | ||
1122 | .hex = trace_special_hex, | ||
1123 | .binary = trace_special_bin, | ||
1124 | }; | ||
1125 | |||
1126 | static struct trace_event trace_special_event = { | ||
1127 | .type = TRACE_SPECIAL, | ||
1128 | .funcs = &trace_special_funcs, | ||
1129 | }; | ||
1130 | |||
1131 | /* TRACE_STACK */ | 1069 | /* TRACE_STACK */ |
1132 | 1070 | ||
1133 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, | 1071 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, |
@@ -1161,9 +1099,6 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
1161 | 1099 | ||
1162 | static struct trace_event_functions trace_stack_funcs = { | 1100 | static struct trace_event_functions trace_stack_funcs = { |
1163 | .trace = trace_stack_print, | 1101 | .trace = trace_stack_print, |
1164 | .raw = trace_special_print, | ||
1165 | .hex = trace_special_hex, | ||
1166 | .binary = trace_special_bin, | ||
1167 | }; | 1102 | }; |
1168 | 1103 | ||
1169 | static struct trace_event trace_stack_event = { | 1104 | static struct trace_event trace_stack_event = { |
@@ -1194,9 +1129,6 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | |||
1194 | 1129 | ||
1195 | static struct trace_event_functions trace_user_stack_funcs = { | 1130 | static struct trace_event_functions trace_user_stack_funcs = { |
1196 | .trace = trace_user_stack_print, | 1131 | .trace = trace_user_stack_print, |
1197 | .raw = trace_special_print, | ||
1198 | .hex = trace_special_hex, | ||
1199 | .binary = trace_special_bin, | ||
1200 | }; | 1132 | }; |
1201 | 1133 | ||
1202 | static struct trace_event trace_user_stack_event = { | 1134 | static struct trace_event trace_user_stack_event = { |
@@ -1314,7 +1246,6 @@ static struct trace_event *events[] __initdata = { | |||
1314 | &trace_fn_event, | 1246 | &trace_fn_event, |
1315 | &trace_ctx_event, | 1247 | &trace_ctx_event, |
1316 | &trace_wake_event, | 1248 | &trace_wake_event, |
1317 | &trace_special_event, | ||
1318 | &trace_stack_event, | 1249 | &trace_stack_event, |
1319 | &trace_user_stack_event, | 1250 | &trace_user_stack_event, |
1320 | &trace_bprint_event, | 1251 | &trace_bprint_event, |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 0e73bc2ef8c5..4086eae6e81b 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -46,7 +46,6 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
46 | struct trace_array_cpu *data; | 46 | struct trace_array_cpu *data; |
47 | unsigned long flags; | 47 | unsigned long flags; |
48 | long disabled; | 48 | long disabled; |
49 | int resched; | ||
50 | int cpu; | 49 | int cpu; |
51 | int pc; | 50 | int pc; |
52 | 51 | ||
@@ -54,7 +53,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
54 | return; | 53 | return; |
55 | 54 | ||
56 | pc = preempt_count(); | 55 | pc = preempt_count(); |
57 | resched = ftrace_preempt_disable(); | 56 | preempt_disable_notrace(); |
58 | 57 | ||
59 | cpu = raw_smp_processor_id(); | 58 | cpu = raw_smp_processor_id(); |
60 | if (cpu != wakeup_current_cpu) | 59 | if (cpu != wakeup_current_cpu) |
@@ -74,7 +73,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
74 | out: | 73 | out: |
75 | atomic_dec(&data->disabled); | 74 | atomic_dec(&data->disabled); |
76 | out_enable: | 75 | out_enable: |
77 | ftrace_preempt_enable(resched); | 76 | preempt_enable_notrace(); |
78 | } | 77 | } |
79 | 78 | ||
80 | static struct ftrace_ops trace_ops __read_mostly = | 79 | static struct ftrace_ops trace_ops __read_mostly = |
@@ -383,6 +382,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
383 | #ifdef CONFIG_FTRACE_SELFTEST | 382 | #ifdef CONFIG_FTRACE_SELFTEST |
384 | .selftest = trace_selftest_startup_wakeup, | 383 | .selftest = trace_selftest_startup_wakeup, |
385 | #endif | 384 | #endif |
385 | .use_max_tr = 1, | ||
386 | }; | 386 | }; |
387 | 387 | ||
388 | static struct tracer wakeup_rt_tracer __read_mostly = | 388 | static struct tracer wakeup_rt_tracer __read_mostly = |
@@ -397,6 +397,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
397 | #ifdef CONFIG_FTRACE_SELFTEST | 397 | #ifdef CONFIG_FTRACE_SELFTEST |
398 | .selftest = trace_selftest_startup_wakeup, | 398 | .selftest = trace_selftest_startup_wakeup, |
399 | #endif | 399 | #endif |
400 | .use_max_tr = 1, | ||
400 | }; | 401 | }; |
401 | 402 | ||
402 | __init static int init_wakeup_tracer(void) | 403 | __init static int init_wakeup_tracer(void) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 250e7f9bd2f0..155a415b3209 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -13,11 +13,9 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
13 | case TRACE_WAKE: | 13 | case TRACE_WAKE: |
14 | case TRACE_STACK: | 14 | case TRACE_STACK: |
15 | case TRACE_PRINT: | 15 | case TRACE_PRINT: |
16 | case TRACE_SPECIAL: | ||
17 | case TRACE_BRANCH: | 16 | case TRACE_BRANCH: |
18 | case TRACE_GRAPH_ENT: | 17 | case TRACE_GRAPH_ENT: |
19 | case TRACE_GRAPH_RET: | 18 | case TRACE_GRAPH_RET: |
20 | case TRACE_KSYM: | ||
21 | return 1; | 19 | return 1; |
22 | } | 20 | } |
23 | return 0; | 21 | return 0; |
@@ -691,38 +689,6 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
691 | } | 689 | } |
692 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 690 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
693 | 691 | ||
694 | #ifdef CONFIG_SYSPROF_TRACER | ||
695 | int | ||
696 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | ||
697 | { | ||
698 | unsigned long count; | ||
699 | int ret; | ||
700 | |||
701 | /* start the tracing */ | ||
702 | ret = tracer_init(trace, tr); | ||
703 | if (ret) { | ||
704 | warn_failed_init_tracer(trace, ret); | ||
705 | return ret; | ||
706 | } | ||
707 | |||
708 | /* Sleep for a 1/10 of a second */ | ||
709 | msleep(100); | ||
710 | /* stop the tracing. */ | ||
711 | tracing_stop(); | ||
712 | /* check the trace buffer */ | ||
713 | ret = trace_test_buffer(tr, &count); | ||
714 | trace->reset(tr); | ||
715 | tracing_start(); | ||
716 | |||
717 | if (!ret && !count) { | ||
718 | printk(KERN_CONT ".. no entries found .."); | ||
719 | ret = -1; | ||
720 | } | ||
721 | |||
722 | return ret; | ||
723 | } | ||
724 | #endif /* CONFIG_SYSPROF_TRACER */ | ||
725 | |||
726 | #ifdef CONFIG_BRANCH_TRACER | 692 | #ifdef CONFIG_BRANCH_TRACER |
727 | int | 693 | int |
728 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | 694 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) |
@@ -755,56 +721,3 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
755 | } | 721 | } |
756 | #endif /* CONFIG_BRANCH_TRACER */ | 722 | #endif /* CONFIG_BRANCH_TRACER */ |
757 | 723 | ||
758 | #ifdef CONFIG_KSYM_TRACER | ||
759 | static int ksym_selftest_dummy; | ||
760 | |||
761 | int | ||
762 | trace_selftest_startup_ksym(struct tracer *trace, struct trace_array *tr) | ||
763 | { | ||
764 | unsigned long count; | ||
765 | int ret; | ||
766 | |||
767 | /* start the tracing */ | ||
768 | ret = tracer_init(trace, tr); | ||
769 | if (ret) { | ||
770 | warn_failed_init_tracer(trace, ret); | ||
771 | return ret; | ||
772 | } | ||
773 | |||
774 | ksym_selftest_dummy = 0; | ||
775 | /* Register the read-write tracing request */ | ||
776 | |||
777 | ret = process_new_ksym_entry("ksym_selftest_dummy", | ||
778 | HW_BREAKPOINT_R | HW_BREAKPOINT_W, | ||
779 | (unsigned long)(&ksym_selftest_dummy)); | ||
780 | |||
781 | if (ret < 0) { | ||
782 | printk(KERN_CONT "ksym_trace read-write startup test failed\n"); | ||
783 | goto ret_path; | ||
784 | } | ||
785 | /* Perform a read and a write operation over the dummy variable to | ||
786 | * trigger the tracer | ||
787 | */ | ||
788 | if (ksym_selftest_dummy == 0) | ||
789 | ksym_selftest_dummy++; | ||
790 | |||
791 | /* stop the tracing. */ | ||
792 | tracing_stop(); | ||
793 | /* check the trace buffer */ | ||
794 | ret = trace_test_buffer(tr, &count); | ||
795 | trace->reset(tr); | ||
796 | tracing_start(); | ||
797 | |||
798 | /* read & write operations - one each is performed on the dummy variable | ||
799 | * triggering two entries in the trace buffer | ||
800 | */ | ||
801 | if (!ret && count != 2) { | ||
802 | printk(KERN_CONT "Ksym tracer startup test failed"); | ||
803 | ret = -1; | ||
804 | } | ||
805 | |||
806 | ret_path: | ||
807 | return ret; | ||
808 | } | ||
809 | #endif /* CONFIG_KSYM_TRACER */ | ||
810 | |||
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index f4bc9b27de5f..056468eae7cf 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -110,12 +110,12 @@ static inline void check_stack(void) | |||
110 | static void | 110 | static void |
111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) | 111 | stack_trace_call(unsigned long ip, unsigned long parent_ip) |
112 | { | 112 | { |
113 | int cpu, resched; | 113 | int cpu; |
114 | 114 | ||
115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) | 115 | if (unlikely(!ftrace_enabled || stack_trace_disabled)) |
116 | return; | 116 | return; |
117 | 117 | ||
118 | resched = ftrace_preempt_disable(); | 118 | preempt_disable_notrace(); |
119 | 119 | ||
120 | cpu = raw_smp_processor_id(); | 120 | cpu = raw_smp_processor_id(); |
121 | /* no atomic needed, we only modify this variable by this cpu */ | 121 | /* no atomic needed, we only modify this variable by this cpu */ |
@@ -127,7 +127,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) | |||
127 | out: | 127 | out: |
128 | per_cpu(trace_active, cpu)--; | 128 | per_cpu(trace_active, cpu)--; |
129 | /* prevent recursion in schedule */ | 129 | /* prevent recursion in schedule */ |
130 | ftrace_preempt_enable(resched); | 130 | preempt_enable_notrace(); |
131 | } | 131 | } |
132 | 132 | ||
133 | static struct ftrace_ops trace_ops __read_mostly = | 133 | static struct ftrace_ops trace_ops __read_mostly = |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 34e35804304b..bac752f0cfb5 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -23,6 +23,9 @@ static int syscall_exit_register(struct ftrace_event_call *event, | |||
23 | static int syscall_enter_define_fields(struct ftrace_event_call *call); | 23 | static int syscall_enter_define_fields(struct ftrace_event_call *call); |
24 | static int syscall_exit_define_fields(struct ftrace_event_call *call); | 24 | static int syscall_exit_define_fields(struct ftrace_event_call *call); |
25 | 25 | ||
26 | /* All syscall exit events have the same fields */ | ||
27 | static LIST_HEAD(syscall_exit_fields); | ||
28 | |||
26 | static struct list_head * | 29 | static struct list_head * |
27 | syscall_get_enter_fields(struct ftrace_event_call *call) | 30 | syscall_get_enter_fields(struct ftrace_event_call *call) |
28 | { | 31 | { |
@@ -34,9 +37,7 @@ syscall_get_enter_fields(struct ftrace_event_call *call) | |||
34 | static struct list_head * | 37 | static struct list_head * |
35 | syscall_get_exit_fields(struct ftrace_event_call *call) | 38 | syscall_get_exit_fields(struct ftrace_event_call *call) |
36 | { | 39 | { |
37 | struct syscall_metadata *entry = call->data; | 40 | return &syscall_exit_fields; |
38 | |||
39 | return &entry->exit_fields; | ||
40 | } | 41 | } |
41 | 42 | ||
42 | struct trace_event_functions enter_syscall_print_funcs = { | 43 | struct trace_event_functions enter_syscall_print_funcs = { |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c deleted file mode 100644 index a7974a552ca9..000000000000 --- a/kernel/trace/trace_sysprof.c +++ /dev/null | |||
@@ -1,329 +0,0 @@ | |||
1 | /* | ||
2 | * trace stack traces | ||
3 | * | ||
4 | * Copyright (C) 2004-2008, Soeren Sandmann | ||
5 | * Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com> | ||
6 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | ||
7 | */ | ||
8 | #include <linux/kallsyms.h> | ||
9 | #include <linux/debugfs.h> | ||
10 | #include <linux/hrtimer.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/ftrace.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/fs.h> | ||
16 | |||
17 | #include <asm/stacktrace.h> | ||
18 | |||
19 | #include "trace.h" | ||
20 | |||
21 | static struct trace_array *sysprof_trace; | ||
22 | static int __read_mostly tracer_enabled; | ||
23 | |||
24 | /* | ||
25 | * 1 msec sample interval by default: | ||
26 | */ | ||
27 | static unsigned long sample_period = 1000000; | ||
28 | static const unsigned int sample_max_depth = 512; | ||
29 | |||
30 | static DEFINE_MUTEX(sample_timer_lock); | ||
31 | /* | ||
32 | * Per CPU hrtimers that do the profiling: | ||
33 | */ | ||
34 | static DEFINE_PER_CPU(struct hrtimer, stack_trace_hrtimer); | ||
35 | |||
36 | struct stack_frame { | ||
37 | const void __user *next_fp; | ||
38 | unsigned long return_address; | ||
39 | }; | ||
40 | |||
41 | static int copy_stack_frame(const void __user *fp, struct stack_frame *frame) | ||
42 | { | ||
43 | int ret; | ||
44 | |||
45 | if (!access_ok(VERIFY_READ, fp, sizeof(*frame))) | ||
46 | return 0; | ||
47 | |||
48 | ret = 1; | ||
49 | pagefault_disable(); | ||
50 | if (__copy_from_user_inatomic(frame, fp, sizeof(*frame))) | ||
51 | ret = 0; | ||
52 | pagefault_enable(); | ||
53 | |||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | struct backtrace_info { | ||
58 | struct trace_array_cpu *data; | ||
59 | struct trace_array *tr; | ||
60 | int pos; | ||
61 | }; | ||
62 | |||
63 | static void | ||
64 | backtrace_warning_symbol(void *data, char *msg, unsigned long symbol) | ||
65 | { | ||
66 | /* Ignore warnings */ | ||
67 | } | ||
68 | |||
69 | static void backtrace_warning(void *data, char *msg) | ||
70 | { | ||
71 | /* Ignore warnings */ | ||
72 | } | ||
73 | |||
74 | static int backtrace_stack(void *data, char *name) | ||
75 | { | ||
76 | /* Don't bother with IRQ stacks for now */ | ||
77 | return -1; | ||
78 | } | ||
79 | |||
80 | static void backtrace_address(void *data, unsigned long addr, int reliable) | ||
81 | { | ||
82 | struct backtrace_info *info = data; | ||
83 | |||
84 | if (info->pos < sample_max_depth && reliable) { | ||
85 | __trace_special(info->tr, info->data, 1, addr, 0); | ||
86 | |||
87 | info->pos++; | ||
88 | } | ||
89 | } | ||
90 | |||
91 | static const struct stacktrace_ops backtrace_ops = { | ||
92 | .warning = backtrace_warning, | ||
93 | .warning_symbol = backtrace_warning_symbol, | ||
94 | .stack = backtrace_stack, | ||
95 | .address = backtrace_address, | ||
96 | .walk_stack = print_context_stack, | ||
97 | }; | ||
98 | |||
99 | static int | ||
100 | trace_kernel(struct pt_regs *regs, struct trace_array *tr, | ||
101 | struct trace_array_cpu *data) | ||
102 | { | ||
103 | struct backtrace_info info; | ||
104 | unsigned long bp; | ||
105 | char *stack; | ||
106 | |||
107 | info.tr = tr; | ||
108 | info.data = data; | ||
109 | info.pos = 1; | ||
110 | |||
111 | __trace_special(info.tr, info.data, 1, regs->ip, 0); | ||
112 | |||
113 | stack = ((char *)regs + sizeof(struct pt_regs)); | ||
114 | #ifdef CONFIG_FRAME_POINTER | ||
115 | bp = regs->bp; | ||
116 | #else | ||
117 | bp = 0; | ||
118 | #endif | ||
119 | |||
120 | dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, &info); | ||
121 | |||
122 | return info.pos; | ||
123 | } | ||
124 | |||
125 | static void timer_notify(struct pt_regs *regs, int cpu) | ||
126 | { | ||
127 | struct trace_array_cpu *data; | ||
128 | struct stack_frame frame; | ||
129 | struct trace_array *tr; | ||
130 | const void __user *fp; | ||
131 | int is_user; | ||
132 | int i; | ||
133 | |||
134 | if (!regs) | ||
135 | return; | ||
136 | |||
137 | tr = sysprof_trace; | ||
138 | data = tr->data[cpu]; | ||
139 | is_user = user_mode(regs); | ||
140 | |||
141 | if (!current || current->pid == 0) | ||
142 | return; | ||
143 | |||
144 | if (is_user && current->state != TASK_RUNNING) | ||
145 | return; | ||
146 | |||
147 | __trace_special(tr, data, 0, 0, current->pid); | ||
148 | |||
149 | if (!is_user) | ||
150 | i = trace_kernel(regs, tr, data); | ||
151 | else | ||
152 | i = 0; | ||
153 | |||
154 | /* | ||
155 | * Trace user stack if we are not a kernel thread | ||
156 | */ | ||
157 | if (current->mm && i < sample_max_depth) { | ||
158 | regs = (struct pt_regs *)current->thread.sp0 - 1; | ||
159 | |||
160 | fp = (void __user *)regs->bp; | ||
161 | |||
162 | __trace_special(tr, data, 2, regs->ip, 0); | ||
163 | |||
164 | while (i < sample_max_depth) { | ||
165 | frame.next_fp = NULL; | ||
166 | frame.return_address = 0; | ||
167 | if (!copy_stack_frame(fp, &frame)) | ||
168 | break; | ||
169 | if ((unsigned long)fp < regs->sp) | ||
170 | break; | ||
171 | |||
172 | __trace_special(tr, data, 2, frame.return_address, | ||
173 | (unsigned long)fp); | ||
174 | fp = frame.next_fp; | ||
175 | |||
176 | i++; | ||
177 | } | ||
178 | |||
179 | } | ||
180 | |||
181 | /* | ||
182 | * Special trace entry if we overflow the max depth: | ||
183 | */ | ||
184 | if (i == sample_max_depth) | ||
185 | __trace_special(tr, data, -1, -1, -1); | ||
186 | |||
187 | __trace_special(tr, data, 3, current->pid, i); | ||
188 | } | ||
189 | |||
190 | static enum hrtimer_restart stack_trace_timer_fn(struct hrtimer *hrtimer) | ||
191 | { | ||
192 | /* trace here */ | ||
193 | timer_notify(get_irq_regs(), smp_processor_id()); | ||
194 | |||
195 | hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); | ||
196 | |||
197 | return HRTIMER_RESTART; | ||
198 | } | ||
199 | |||
200 | static void start_stack_timer(void *unused) | ||
201 | { | ||
202 | struct hrtimer *hrtimer = &__get_cpu_var(stack_trace_hrtimer); | ||
203 | |||
204 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
205 | hrtimer->function = stack_trace_timer_fn; | ||
206 | |||
207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), | ||
208 | HRTIMER_MODE_REL_PINNED); | ||
209 | } | ||
210 | |||
211 | static void start_stack_timers(void) | ||
212 | { | ||
213 | on_each_cpu(start_stack_timer, NULL, 1); | ||
214 | } | ||
215 | |||
216 | static void stop_stack_timer(int cpu) | ||
217 | { | ||
218 | struct hrtimer *hrtimer = &per_cpu(stack_trace_hrtimer, cpu); | ||
219 | |||
220 | hrtimer_cancel(hrtimer); | ||
221 | } | ||
222 | |||
223 | static void stop_stack_timers(void) | ||
224 | { | ||
225 | int cpu; | ||
226 | |||
227 | for_each_online_cpu(cpu) | ||
228 | stop_stack_timer(cpu); | ||
229 | } | ||
230 | |||
231 | static void stop_stack_trace(struct trace_array *tr) | ||
232 | { | ||
233 | mutex_lock(&sample_timer_lock); | ||
234 | stop_stack_timers(); | ||
235 | tracer_enabled = 0; | ||
236 | mutex_unlock(&sample_timer_lock); | ||
237 | } | ||
238 | |||
239 | static int stack_trace_init(struct trace_array *tr) | ||
240 | { | ||
241 | sysprof_trace = tr; | ||
242 | |||
243 | tracing_start_cmdline_record(); | ||
244 | |||
245 | mutex_lock(&sample_timer_lock); | ||
246 | start_stack_timers(); | ||
247 | tracer_enabled = 1; | ||
248 | mutex_unlock(&sample_timer_lock); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static void stack_trace_reset(struct trace_array *tr) | ||
253 | { | ||
254 | tracing_stop_cmdline_record(); | ||
255 | stop_stack_trace(tr); | ||
256 | } | ||
257 | |||
258 | static struct tracer stack_trace __read_mostly = | ||
259 | { | ||
260 | .name = "sysprof", | ||
261 | .init = stack_trace_init, | ||
262 | .reset = stack_trace_reset, | ||
263 | #ifdef CONFIG_FTRACE_SELFTEST | ||
264 | .selftest = trace_selftest_startup_sysprof, | ||
265 | #endif | ||
266 | }; | ||
267 | |||
268 | __init static int init_stack_trace(void) | ||
269 | { | ||
270 | return register_tracer(&stack_trace); | ||
271 | } | ||
272 | device_initcall(init_stack_trace); | ||
273 | |||
274 | #define MAX_LONG_DIGITS 22 | ||
275 | |||
276 | static ssize_t | ||
277 | sysprof_sample_read(struct file *filp, char __user *ubuf, | ||
278 | size_t cnt, loff_t *ppos) | ||
279 | { | ||
280 | char buf[MAX_LONG_DIGITS]; | ||
281 | int r; | ||
282 | |||
283 | r = sprintf(buf, "%ld\n", nsecs_to_usecs(sample_period)); | ||
284 | |||
285 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
286 | } | ||
287 | |||
288 | static ssize_t | ||
289 | sysprof_sample_write(struct file *filp, const char __user *ubuf, | ||
290 | size_t cnt, loff_t *ppos) | ||
291 | { | ||
292 | char buf[MAX_LONG_DIGITS]; | ||
293 | unsigned long val; | ||
294 | |||
295 | if (cnt > MAX_LONG_DIGITS-1) | ||
296 | cnt = MAX_LONG_DIGITS-1; | ||
297 | |||
298 | if (copy_from_user(&buf, ubuf, cnt)) | ||
299 | return -EFAULT; | ||
300 | |||
301 | buf[cnt] = 0; | ||
302 | |||
303 | val = simple_strtoul(buf, NULL, 10); | ||
304 | /* | ||
305 | * Enforce a minimum sample period of 100 usecs: | ||
306 | */ | ||
307 | if (val < 100) | ||
308 | val = 100; | ||
309 | |||
310 | mutex_lock(&sample_timer_lock); | ||
311 | stop_stack_timers(); | ||
312 | sample_period = val * 1000; | ||
313 | start_stack_timers(); | ||
314 | mutex_unlock(&sample_timer_lock); | ||
315 | |||
316 | return cnt; | ||
317 | } | ||
318 | |||
319 | static const struct file_operations sysprof_sample_fops = { | ||
320 | .read = sysprof_sample_read, | ||
321 | .write = sysprof_sample_write, | ||
322 | }; | ||
323 | |||
324 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer) | ||
325 | { | ||
326 | |||
327 | trace_create_file("sysprof_sample_period", 0644, | ||
328 | d_tracer, NULL, &sysprof_sample_fops); | ||
329 | } | ||
diff --git a/kernel/watchdog.c b/kernel/watchdog.c new file mode 100644 index 000000000000..613bc1f04610 --- /dev/null +++ b/kernel/watchdog.c | |||
@@ -0,0 +1,567 @@ | |||
1 | /* | ||
2 | * Detect hard and soft lockups on a system | ||
3 | * | ||
4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. | ||
5 | * | ||
6 | * this code detects hard lockups: incidents in where on a CPU | ||
7 | * the kernel does not respond to anything except NMI. | ||
8 | * | ||
9 | * Note: Most of this code is borrowed heavily from softlockup.c, | ||
10 | * so thanks to Ingo for the initial implementation. | ||
11 | * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks | ||
12 | * to those contributors as well. | ||
13 | */ | ||
14 | |||
15 | #include <linux/mm.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/nmi.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/freezer.h> | ||
21 | #include <linux/kthread.h> | ||
22 | #include <linux/lockdep.h> | ||
23 | #include <linux/notifier.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/sysctl.h> | ||
26 | |||
27 | #include <asm/irq_regs.h> | ||
28 | #include <linux/perf_event.h> | ||
29 | |||
30 | int watchdog_enabled; | ||
31 | int __read_mostly softlockup_thresh = 60; | ||
32 | |||
33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | ||
34 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | ||
35 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); | ||
36 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); | ||
37 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); | ||
38 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
39 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); | ||
40 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | ||
41 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); | ||
42 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | ||
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | ||
44 | #endif | ||
45 | |||
46 | static int __read_mostly did_panic; | ||
47 | static int __initdata no_watchdog; | ||
48 | |||
49 | |||
50 | /* boot commands */ | ||
51 | /* | ||
52 | * Should we panic when a soft-lockup or hard-lockup occurs: | ||
53 | */ | ||
54 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
55 | static int hardlockup_panic; | ||
56 | |||
57 | static int __init hardlockup_panic_setup(char *str) | ||
58 | { | ||
59 | if (!strncmp(str, "panic", 5)) | ||
60 | hardlockup_panic = 1; | ||
61 | return 1; | ||
62 | } | ||
63 | __setup("nmi_watchdog=", hardlockup_panic_setup); | ||
64 | #endif | ||
65 | |||
66 | unsigned int __read_mostly softlockup_panic = | ||
67 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | ||
68 | |||
69 | static int __init softlockup_panic_setup(char *str) | ||
70 | { | ||
71 | softlockup_panic = simple_strtoul(str, NULL, 0); | ||
72 | |||
73 | return 1; | ||
74 | } | ||
75 | __setup("softlockup_panic=", softlockup_panic_setup); | ||
76 | |||
77 | static int __init nowatchdog_setup(char *str) | ||
78 | { | ||
79 | no_watchdog = 1; | ||
80 | return 1; | ||
81 | } | ||
82 | __setup("nowatchdog", nowatchdog_setup); | ||
83 | |||
84 | /* deprecated */ | ||
85 | static int __init nosoftlockup_setup(char *str) | ||
86 | { | ||
87 | no_watchdog = 1; | ||
88 | return 1; | ||
89 | } | ||
90 | __setup("nosoftlockup", nosoftlockup_setup); | ||
91 | /* */ | ||
92 | |||
93 | |||
94 | /* | ||
95 | * Returns seconds, approximately. We don't need nanosecond | ||
96 | * resolution, and we don't need to waste time with a big divide when | ||
97 | * 2^30ns == 1.074s. | ||
98 | */ | ||
99 | static unsigned long get_timestamp(int this_cpu) | ||
100 | { | ||
101 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ | ||
102 | } | ||
103 | |||
104 | static unsigned long get_sample_period(void) | ||
105 | { | ||
106 | /* | ||
107 | * convert softlockup_thresh from seconds to ns | ||
108 | * the divide by 5 is to give hrtimer 5 chances to | ||
109 | * increment before the hardlockup detector generates | ||
110 | * a warning | ||
111 | */ | ||
112 | return softlockup_thresh / 5 * NSEC_PER_SEC; | ||
113 | } | ||
114 | |||
115 | /* Commands for resetting the watchdog */ | ||
116 | static void __touch_watchdog(void) | ||
117 | { | ||
118 | int this_cpu = smp_processor_id(); | ||
119 | |||
120 | __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); | ||
121 | } | ||
122 | |||
123 | void touch_softlockup_watchdog(void) | ||
124 | { | ||
125 | __get_cpu_var(watchdog_touch_ts) = 0; | ||
126 | } | ||
127 | EXPORT_SYMBOL(touch_softlockup_watchdog); | ||
128 | |||
129 | void touch_all_softlockup_watchdogs(void) | ||
130 | { | ||
131 | int cpu; | ||
132 | |||
133 | /* | ||
134 | * this is done lockless | ||
135 | * do we care if a 0 races with a timestamp? | ||
136 | * all it means is the softlock check starts one cycle later | ||
137 | */ | ||
138 | for_each_online_cpu(cpu) | ||
139 | per_cpu(watchdog_touch_ts, cpu) = 0; | ||
140 | } | ||
141 | |||
142 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
143 | void touch_nmi_watchdog(void) | ||
144 | { | ||
145 | __get_cpu_var(watchdog_nmi_touch) = true; | ||
146 | touch_softlockup_watchdog(); | ||
147 | } | ||
148 | EXPORT_SYMBOL(touch_nmi_watchdog); | ||
149 | |||
150 | #endif | ||
151 | |||
152 | void touch_softlockup_watchdog_sync(void) | ||
153 | { | ||
154 | __raw_get_cpu_var(softlockup_touch_sync) = true; | ||
155 | __raw_get_cpu_var(watchdog_touch_ts) = 0; | ||
156 | } | ||
157 | |||
158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
159 | /* watchdog detector functions */ | ||
160 | static int is_hardlockup(void) | ||
161 | { | ||
162 | unsigned long hrint = __get_cpu_var(hrtimer_interrupts); | ||
163 | |||
164 | if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) | ||
165 | return 1; | ||
166 | |||
167 | __get_cpu_var(hrtimer_interrupts_saved) = hrint; | ||
168 | return 0; | ||
169 | } | ||
170 | #endif | ||
171 | |||
172 | static int is_softlockup(unsigned long touch_ts) | ||
173 | { | ||
174 | unsigned long now = get_timestamp(smp_processor_id()); | ||
175 | |||
176 | /* Warn about unreasonable delays: */ | ||
177 | if (time_after(now, touch_ts + softlockup_thresh)) | ||
178 | return now - touch_ts; | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static int | ||
184 | watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) | ||
185 | { | ||
186 | did_panic = 1; | ||
187 | |||
188 | return NOTIFY_DONE; | ||
189 | } | ||
190 | |||
191 | static struct notifier_block panic_block = { | ||
192 | .notifier_call = watchdog_panic, | ||
193 | }; | ||
194 | |||
195 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
196 | static struct perf_event_attr wd_hw_attr = { | ||
197 | .type = PERF_TYPE_HARDWARE, | ||
198 | .config = PERF_COUNT_HW_CPU_CYCLES, | ||
199 | .size = sizeof(struct perf_event_attr), | ||
200 | .pinned = 1, | ||
201 | .disabled = 1, | ||
202 | }; | ||
203 | |||
204 | /* Callback function for perf event subsystem */ | ||
205 | void watchdog_overflow_callback(struct perf_event *event, int nmi, | ||
206 | struct perf_sample_data *data, | ||
207 | struct pt_regs *regs) | ||
208 | { | ||
209 | if (__get_cpu_var(watchdog_nmi_touch) == true) { | ||
210 | __get_cpu_var(watchdog_nmi_touch) = false; | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | /* check for a hardlockup | ||
215 | * This is done by making sure our timer interrupt | ||
216 | * is incrementing. The timer interrupt should have | ||
217 | * fired multiple times before we overflow'd. If it hasn't | ||
218 | * then this is a good indication the cpu is stuck | ||
219 | */ | ||
220 | if (is_hardlockup()) { | ||
221 | int this_cpu = smp_processor_id(); | ||
222 | |||
223 | /* only print hardlockups once */ | ||
224 | if (__get_cpu_var(hard_watchdog_warn) == true) | ||
225 | return; | ||
226 | |||
227 | if (hardlockup_panic) | ||
228 | panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); | ||
229 | else | ||
230 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); | ||
231 | |||
232 | __get_cpu_var(hard_watchdog_warn) = true; | ||
233 | return; | ||
234 | } | ||
235 | |||
236 | __get_cpu_var(hard_watchdog_warn) = false; | ||
237 | return; | ||
238 | } | ||
239 | static void watchdog_interrupt_count(void) | ||
240 | { | ||
241 | __get_cpu_var(hrtimer_interrupts)++; | ||
242 | } | ||
243 | #else | ||
244 | static inline void watchdog_interrupt_count(void) { return; } | ||
245 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | ||
246 | |||
247 | /* watchdog kicker functions */ | ||
248 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | ||
249 | { | ||
250 | unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); | ||
251 | struct pt_regs *regs = get_irq_regs(); | ||
252 | int duration; | ||
253 | |||
254 | /* kick the hardlockup detector */ | ||
255 | watchdog_interrupt_count(); | ||
256 | |||
257 | /* kick the softlockup detector */ | ||
258 | wake_up_process(__get_cpu_var(softlockup_watchdog)); | ||
259 | |||
260 | /* .. and repeat */ | ||
261 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); | ||
262 | |||
263 | if (touch_ts == 0) { | ||
264 | if (unlikely(__get_cpu_var(softlockup_touch_sync))) { | ||
265 | /* | ||
266 | * If the time stamp was touched atomically | ||
267 | * make sure the scheduler tick is up to date. | ||
268 | */ | ||
269 | __get_cpu_var(softlockup_touch_sync) = false; | ||
270 | sched_clock_tick(); | ||
271 | } | ||
272 | __touch_watchdog(); | ||
273 | return HRTIMER_RESTART; | ||
274 | } | ||
275 | |||
276 | /* check for a softlockup | ||
277 | * This is done by making sure a high priority task is | ||
278 | * being scheduled. The task touches the watchdog to | ||
279 | * indicate it is getting cpu time. If it hasn't then | ||
280 | * this is a good indication some task is hogging the cpu | ||
281 | */ | ||
282 | duration = is_softlockup(touch_ts); | ||
283 | if (unlikely(duration)) { | ||
284 | /* only warn once */ | ||
285 | if (__get_cpu_var(soft_watchdog_warn) == true) | ||
286 | return HRTIMER_RESTART; | ||
287 | |||
288 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | ||
289 | smp_processor_id(), duration, | ||
290 | current->comm, task_pid_nr(current)); | ||
291 | print_modules(); | ||
292 | print_irqtrace_events(current); | ||
293 | if (regs) | ||
294 | show_regs(regs); | ||
295 | else | ||
296 | dump_stack(); | ||
297 | |||
298 | if (softlockup_panic) | ||
299 | panic("softlockup: hung tasks"); | ||
300 | __get_cpu_var(soft_watchdog_warn) = true; | ||
301 | } else | ||
302 | __get_cpu_var(soft_watchdog_warn) = false; | ||
303 | |||
304 | return HRTIMER_RESTART; | ||
305 | } | ||
306 | |||
307 | |||
308 | /* | ||
309 | * The watchdog thread - touches the timestamp. | ||
310 | */ | ||
311 | static int watchdog(void *unused) | ||
312 | { | ||
313 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | ||
314 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | ||
315 | |||
316 | sched_setscheduler(current, SCHED_FIFO, ¶m); | ||
317 | |||
318 | /* initialize timestamp */ | ||
319 | __touch_watchdog(); | ||
320 | |||
321 | /* kick off the timer for the hardlockup detector */ | ||
322 | /* done here because hrtimer_start can only pin to smp_processor_id() */ | ||
323 | hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), | ||
324 | HRTIMER_MODE_REL_PINNED); | ||
325 | |||
326 | set_current_state(TASK_INTERRUPTIBLE); | ||
327 | /* | ||
328 | * Run briefly once per second to reset the softlockup timestamp. | ||
329 | * If this gets delayed for more than 60 seconds then the | ||
330 | * debug-printout triggers in watchdog_timer_fn(). | ||
331 | */ | ||
332 | while (!kthread_should_stop()) { | ||
333 | __touch_watchdog(); | ||
334 | schedule(); | ||
335 | |||
336 | if (kthread_should_stop()) | ||
337 | break; | ||
338 | |||
339 | set_current_state(TASK_INTERRUPTIBLE); | ||
340 | } | ||
341 | __set_current_state(TASK_RUNNING); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | |||
347 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | ||
348 | static int watchdog_nmi_enable(int cpu) | ||
349 | { | ||
350 | struct perf_event_attr *wd_attr; | ||
351 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
352 | |||
353 | /* is it already setup and enabled? */ | ||
354 | if (event && event->state > PERF_EVENT_STATE_OFF) | ||
355 | goto out; | ||
356 | |||
357 | /* it is setup but not enabled */ | ||
358 | if (event != NULL) | ||
359 | goto out_enable; | ||
360 | |||
361 | /* Try to register using hardware perf events */ | ||
362 | wd_attr = &wd_hw_attr; | ||
363 | wd_attr->sample_period = hw_nmi_get_sample_period(); | ||
364 | event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); | ||
365 | if (!IS_ERR(event)) { | ||
366 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); | ||
367 | goto out_save; | ||
368 | } | ||
369 | |||
370 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); | ||
371 | return -1; | ||
372 | |||
373 | /* success path */ | ||
374 | out_save: | ||
375 | per_cpu(watchdog_ev, cpu) = event; | ||
376 | out_enable: | ||
377 | perf_event_enable(per_cpu(watchdog_ev, cpu)); | ||
378 | out: | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | static void watchdog_nmi_disable(int cpu) | ||
383 | { | ||
384 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | ||
385 | |||
386 | if (event) { | ||
387 | perf_event_disable(event); | ||
388 | per_cpu(watchdog_ev, cpu) = NULL; | ||
389 | |||
390 | /* should be in cleanup, but blocks oprofile */ | ||
391 | perf_event_release_kernel(event); | ||
392 | } | ||
393 | return; | ||
394 | } | ||
395 | #else | ||
396 | static int watchdog_nmi_enable(int cpu) { return 0; } | ||
397 | static void watchdog_nmi_disable(int cpu) { return; } | ||
398 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | ||
399 | |||
400 | /* prepare/enable/disable routines */ | ||
401 | static int watchdog_prepare_cpu(int cpu) | ||
402 | { | ||
403 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | ||
404 | |||
405 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); | ||
406 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
407 | hrtimer->function = watchdog_timer_fn; | ||
408 | |||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | static int watchdog_enable(int cpu) | ||
413 | { | ||
414 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | ||
415 | |||
416 | /* enable the perf event */ | ||
417 | if (watchdog_nmi_enable(cpu) != 0) | ||
418 | return -1; | ||
419 | |||
420 | /* create the watchdog thread */ | ||
421 | if (!p) { | ||
422 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); | ||
423 | if (IS_ERR(p)) { | ||
424 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); | ||
425 | return -1; | ||
426 | } | ||
427 | kthread_bind(p, cpu); | ||
428 | per_cpu(watchdog_touch_ts, cpu) = 0; | ||
429 | per_cpu(softlockup_watchdog, cpu) = p; | ||
430 | wake_up_process(p); | ||
431 | } | ||
432 | |||
433 | return 0; | ||
434 | } | ||
435 | |||
436 | static void watchdog_disable(int cpu) | ||
437 | { | ||
438 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | ||
439 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | ||
440 | |||
441 | /* | ||
442 | * cancel the timer first to stop incrementing the stats | ||
443 | * and waking up the kthread | ||
444 | */ | ||
445 | hrtimer_cancel(hrtimer); | ||
446 | |||
447 | /* disable the perf event */ | ||
448 | watchdog_nmi_disable(cpu); | ||
449 | |||
450 | /* stop the watchdog thread */ | ||
451 | if (p) { | ||
452 | per_cpu(softlockup_watchdog, cpu) = NULL; | ||
453 | kthread_stop(p); | ||
454 | } | ||
455 | |||
456 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | ||
457 | watchdog_enabled = 1; | ||
458 | } | ||
459 | |||
460 | static void watchdog_enable_all_cpus(void) | ||
461 | { | ||
462 | int cpu; | ||
463 | int result = 0; | ||
464 | |||
465 | for_each_online_cpu(cpu) | ||
466 | result += watchdog_enable(cpu); | ||
467 | |||
468 | if (result) | ||
469 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); | ||
470 | |||
471 | } | ||
472 | |||
473 | static void watchdog_disable_all_cpus(void) | ||
474 | { | ||
475 | int cpu; | ||
476 | |||
477 | for_each_online_cpu(cpu) | ||
478 | watchdog_disable(cpu); | ||
479 | |||
480 | /* if all watchdogs are disabled, then they are disabled for the system */ | ||
481 | watchdog_enabled = 0; | ||
482 | } | ||
483 | |||
484 | |||
485 | /* sysctl functions */ | ||
486 | #ifdef CONFIG_SYSCTL | ||
487 | /* | ||
488 | * proc handler for /proc/sys/kernel/nmi_watchdog | ||
489 | */ | ||
490 | |||
491 | int proc_dowatchdog_enabled(struct ctl_table *table, int write, | ||
492 | void __user *buffer, size_t *length, loff_t *ppos) | ||
493 | { | ||
494 | proc_dointvec(table, write, buffer, length, ppos); | ||
495 | |||
496 | if (watchdog_enabled) | ||
497 | watchdog_enable_all_cpus(); | ||
498 | else | ||
499 | watchdog_disable_all_cpus(); | ||
500 | return 0; | ||
501 | } | ||
502 | |||
503 | int proc_dowatchdog_thresh(struct ctl_table *table, int write, | ||
504 | void __user *buffer, | ||
505 | size_t *lenp, loff_t *ppos) | ||
506 | { | ||
507 | return proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
508 | } | ||
509 | #endif /* CONFIG_SYSCTL */ | ||
510 | |||
511 | |||
512 | /* | ||
513 | * Create/destroy watchdog threads as CPUs come and go: | ||
514 | */ | ||
515 | static int __cpuinit | ||
516 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
517 | { | ||
518 | int hotcpu = (unsigned long)hcpu; | ||
519 | |||
520 | switch (action) { | ||
521 | case CPU_UP_PREPARE: | ||
522 | case CPU_UP_PREPARE_FROZEN: | ||
523 | if (watchdog_prepare_cpu(hotcpu)) | ||
524 | return NOTIFY_BAD; | ||
525 | break; | ||
526 | case CPU_ONLINE: | ||
527 | case CPU_ONLINE_FROZEN: | ||
528 | if (watchdog_enable(hotcpu)) | ||
529 | return NOTIFY_BAD; | ||
530 | break; | ||
531 | #ifdef CONFIG_HOTPLUG_CPU | ||
532 | case CPU_UP_CANCELED: | ||
533 | case CPU_UP_CANCELED_FROZEN: | ||
534 | watchdog_disable(hotcpu); | ||
535 | break; | ||
536 | case CPU_DEAD: | ||
537 | case CPU_DEAD_FROZEN: | ||
538 | watchdog_disable(hotcpu); | ||
539 | break; | ||
540 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
541 | } | ||
542 | return NOTIFY_OK; | ||
543 | } | ||
544 | |||
545 | static struct notifier_block __cpuinitdata cpu_nfb = { | ||
546 | .notifier_call = cpu_callback | ||
547 | }; | ||
548 | |||
549 | static int __init spawn_watchdog_task(void) | ||
550 | { | ||
551 | void *cpu = (void *)(long)smp_processor_id(); | ||
552 | int err; | ||
553 | |||
554 | if (no_watchdog) | ||
555 | return 0; | ||
556 | |||
557 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | ||
558 | WARN_ON(err == NOTIFY_BAD); | ||
559 | |||
560 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | ||
561 | register_cpu_notifier(&cpu_nfb); | ||
562 | |||
563 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | ||
564 | |||
565 | return 0; | ||
566 | } | ||
567 | early_initcall(spawn_watchdog_task); | ||