diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/futex.c | 10 | ||||
-rw-r--r-- | kernel/hw_breakpoint.c | 146 | ||||
-rw-r--r-- | kernel/kgdb.c | 56 | ||||
-rw-r--r-- | kernel/lockdep.c | 16 | ||||
-rw-r--r-- | kernel/perf_event.c | 75 | ||||
-rw-r--r-- | kernel/resource.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace.c | 57 | ||||
-rw-r--r-- | kernel/trace/trace.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 165 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 41 | ||||
-rw-r--r-- | kernel/trace/trace_ksym.c | 5 | ||||
-rw-r--r-- | kernel/trace/trace_output.c | 75 | ||||
-rw-r--r-- | kernel/workqueue.c | 131 |
14 files changed, 604 insertions, 203 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 1143012951e9..6f50ef55a6f3 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -971,7 +971,7 @@ NORET_TYPE void do_exit(long code) | |||
971 | exit_thread(); | 971 | exit_thread(); |
972 | cgroup_exit(tsk, 1); | 972 | cgroup_exit(tsk, 1); |
973 | 973 | ||
974 | if (group_dead && tsk->signal->leader) | 974 | if (group_dead) |
975 | disassociate_ctty(1); | 975 | disassociate_ctty(1); |
976 | 976 | ||
977 | module_put(task_thread_info(tsk)->exec_domain->module); | 977 | module_put(task_thread_info(tsk)->exec_domain->module); |
diff --git a/kernel/futex.c b/kernel/futex.c index fb65e822fc41..d73ef1f3e55d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -304,8 +304,14 @@ void put_futex_key(int fshared, union futex_key *key) | |||
304 | */ | 304 | */ |
305 | static int fault_in_user_writeable(u32 __user *uaddr) | 305 | static int fault_in_user_writeable(u32 __user *uaddr) |
306 | { | 306 | { |
307 | int ret = get_user_pages(current, current->mm, (unsigned long)uaddr, | 307 | struct mm_struct *mm = current->mm; |
308 | 1, 1, 0, NULL, NULL); | 308 | int ret; |
309 | |||
310 | down_read(&mm->mmap_sem); | ||
311 | ret = get_user_pages(current, mm, (unsigned long)uaddr, | ||
312 | 1, 1, 0, NULL, NULL); | ||
313 | up_read(&mm->mmap_sem); | ||
314 | |||
309 | return ret < 0 ? ret : 0; | 315 | return ret < 0 ? ret : 0; |
310 | } | 316 | } |
311 | 317 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index cf5ee1628411..366eedf949c0 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
@@ -52,7 +52,7 @@ | |||
52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); | 52 | static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned); |
53 | 53 | ||
54 | /* Number of pinned task breakpoints in a cpu */ | 54 | /* Number of pinned task breakpoints in a cpu */ |
55 | static DEFINE_PER_CPU(unsigned int, task_bp_pinned[HBP_NUM]); | 55 | static DEFINE_PER_CPU(unsigned int, nr_task_bp_pinned[HBP_NUM]); |
56 | 56 | ||
57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ | 57 | /* Number of non-pinned cpu/task breakpoints in a cpu */ |
58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); | 58 | static DEFINE_PER_CPU(unsigned int, nr_bp_flexible); |
@@ -73,7 +73,7 @@ static DEFINE_MUTEX(nr_bp_mutex); | |||
73 | static unsigned int max_task_bp_pinned(int cpu) | 73 | static unsigned int max_task_bp_pinned(int cpu) |
74 | { | 74 | { |
75 | int i; | 75 | int i; |
76 | unsigned int *tsk_pinned = per_cpu(task_bp_pinned, cpu); | 76 | unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
77 | 77 | ||
78 | for (i = HBP_NUM -1; i >= 0; i--) { | 78 | for (i = HBP_NUM -1; i >= 0; i--) { |
79 | if (tsk_pinned[i] > 0) | 79 | if (tsk_pinned[i] > 0) |
@@ -83,15 +83,51 @@ static unsigned int max_task_bp_pinned(int cpu) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | static int task_bp_pinned(struct task_struct *tsk) | ||
87 | { | ||
88 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
89 | struct list_head *list; | ||
90 | struct perf_event *bp; | ||
91 | unsigned long flags; | ||
92 | int count = 0; | ||
93 | |||
94 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
95 | return 0; | ||
96 | |||
97 | list = &ctx->event_list; | ||
98 | |||
99 | spin_lock_irqsave(&ctx->lock, flags); | ||
100 | |||
101 | /* | ||
102 | * The current breakpoint counter is not included in the list | ||
103 | * at the open() callback time | ||
104 | */ | ||
105 | list_for_each_entry(bp, list, event_entry) { | ||
106 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
107 | count++; | ||
108 | } | ||
109 | |||
110 | spin_unlock_irqrestore(&ctx->lock, flags); | ||
111 | |||
112 | return count; | ||
113 | } | ||
114 | |||
86 | /* | 115 | /* |
87 | * Report the number of pinned/un-pinned breakpoints we have in | 116 | * Report the number of pinned/un-pinned breakpoints we have in |
88 | * a given cpu (cpu > -1) or in all of them (cpu = -1). | 117 | * a given cpu (cpu > -1) or in all of them (cpu = -1). |
89 | */ | 118 | */ |
90 | static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | 119 | static void |
120 | fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp) | ||
91 | { | 121 | { |
122 | int cpu = bp->cpu; | ||
123 | struct task_struct *tsk = bp->ctx->task; | ||
124 | |||
92 | if (cpu >= 0) { | 125 | if (cpu >= 0) { |
93 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); | 126 | slots->pinned = per_cpu(nr_cpu_bp_pinned, cpu); |
94 | slots->pinned += max_task_bp_pinned(cpu); | 127 | if (!tsk) |
128 | slots->pinned += max_task_bp_pinned(cpu); | ||
129 | else | ||
130 | slots->pinned += task_bp_pinned(tsk); | ||
95 | slots->flexible = per_cpu(nr_bp_flexible, cpu); | 131 | slots->flexible = per_cpu(nr_bp_flexible, cpu); |
96 | 132 | ||
97 | return; | 133 | return; |
@@ -101,7 +137,10 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
101 | unsigned int nr; | 137 | unsigned int nr; |
102 | 138 | ||
103 | nr = per_cpu(nr_cpu_bp_pinned, cpu); | 139 | nr = per_cpu(nr_cpu_bp_pinned, cpu); |
104 | nr += max_task_bp_pinned(cpu); | 140 | if (!tsk) |
141 | nr += max_task_bp_pinned(cpu); | ||
142 | else | ||
143 | nr += task_bp_pinned(tsk); | ||
105 | 144 | ||
106 | if (nr > slots->pinned) | 145 | if (nr > slots->pinned) |
107 | slots->pinned = nr; | 146 | slots->pinned = nr; |
@@ -118,35 +157,12 @@ static void fetch_bp_busy_slots(struct bp_busy_slots *slots, int cpu) | |||
118 | */ | 157 | */ |
119 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) | 158 | static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable) |
120 | { | 159 | { |
121 | int count = 0; | ||
122 | struct perf_event *bp; | ||
123 | struct perf_event_context *ctx = tsk->perf_event_ctxp; | ||
124 | unsigned int *tsk_pinned; | 160 | unsigned int *tsk_pinned; |
125 | struct list_head *list; | 161 | int count = 0; |
126 | unsigned long flags; | ||
127 | |||
128 | if (WARN_ONCE(!ctx, "No perf context for this task")) | ||
129 | return; | ||
130 | |||
131 | list = &ctx->event_list; | ||
132 | |||
133 | spin_lock_irqsave(&ctx->lock, flags); | ||
134 | |||
135 | /* | ||
136 | * The current breakpoint counter is not included in the list | ||
137 | * at the open() callback time | ||
138 | */ | ||
139 | list_for_each_entry(bp, list, event_entry) { | ||
140 | if (bp->attr.type == PERF_TYPE_BREAKPOINT) | ||
141 | count++; | ||
142 | } | ||
143 | 162 | ||
144 | spin_unlock_irqrestore(&ctx->lock, flags); | 163 | count = task_bp_pinned(tsk); |
145 | 164 | ||
146 | if (WARN_ONCE(count < 0, "No breakpoint counter found in the counter list")) | 165 | tsk_pinned = per_cpu(nr_task_bp_pinned, cpu); |
147 | return; | ||
148 | |||
149 | tsk_pinned = per_cpu(task_bp_pinned, cpu); | ||
150 | if (enable) { | 166 | if (enable) { |
151 | tsk_pinned[count]++; | 167 | tsk_pinned[count]++; |
152 | if (count > 0) | 168 | if (count > 0) |
@@ -193,7 +209,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
193 | * - If attached to a single cpu, check: | 209 | * - If attached to a single cpu, check: |
194 | * | 210 | * |
195 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) | 211 | * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu) |
196 | * + max(per_cpu(task_bp_pinned, cpu)))) < HBP_NUM | 212 | * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM |
197 | * | 213 | * |
198 | * -> If there are already non-pinned counters in this cpu, it means | 214 | * -> If there are already non-pinned counters in this cpu, it means |
199 | * there is already a free slot for them. | 215 | * there is already a free slot for them. |
@@ -204,7 +220,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
204 | * - If attached to every cpus, check: | 220 | * - If attached to every cpus, check: |
205 | * | 221 | * |
206 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) | 222 | * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *)) |
207 | * + max(per_cpu(task_bp_pinned, *)))) < HBP_NUM | 223 | * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM |
208 | * | 224 | * |
209 | * -> This is roughly the same, except we check the number of per cpu | 225 | * -> This is roughly the same, except we check the number of per cpu |
210 | * bp for every cpu and we keep the max one. Same for the per tasks | 226 | * bp for every cpu and we keep the max one. Same for the per tasks |
@@ -216,7 +232,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
216 | * - If attached to a single cpu, check: | 232 | * - If attached to a single cpu, check: |
217 | * | 233 | * |
218 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) | 234 | * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu) |
219 | * + max(per_cpu(task_bp_pinned, cpu))) < HBP_NUM | 235 | * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM |
220 | * | 236 | * |
221 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep | 237 | * -> Same checks as before. But now the nr_bp_flexible, if any, must keep |
222 | * one register at least (or they will never be fed). | 238 | * one register at least (or they will never be fed). |
@@ -224,7 +240,7 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
224 | * - If attached to every cpus, check: | 240 | * - If attached to every cpus, check: |
225 | * | 241 | * |
226 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 242 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
227 | * + max(per_cpu(task_bp_pinned, *))) < HBP_NUM | 243 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
228 | */ | 244 | */ |
229 | int reserve_bp_slot(struct perf_event *bp) | 245 | int reserve_bp_slot(struct perf_event *bp) |
230 | { | 246 | { |
@@ -233,7 +249,7 @@ int reserve_bp_slot(struct perf_event *bp) | |||
233 | 249 | ||
234 | mutex_lock(&nr_bp_mutex); | 250 | mutex_lock(&nr_bp_mutex); |
235 | 251 | ||
236 | fetch_bp_busy_slots(&slots, bp->cpu); | 252 | fetch_bp_busy_slots(&slots, bp); |
237 | 253 | ||
238 | /* Flexible counters need to keep at least one slot */ | 254 | /* Flexible counters need to keep at least one slot */ |
239 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 255 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { |
@@ -259,7 +275,7 @@ void release_bp_slot(struct perf_event *bp) | |||
259 | } | 275 | } |
260 | 276 | ||
261 | 277 | ||
262 | int __register_perf_hw_breakpoint(struct perf_event *bp) | 278 | int register_perf_hw_breakpoint(struct perf_event *bp) |
263 | { | 279 | { |
264 | int ret; | 280 | int ret; |
265 | 281 | ||
@@ -276,19 +292,12 @@ int __register_perf_hw_breakpoint(struct perf_event *bp) | |||
276 | * This is a quick hack that will be removed soon, once we remove | 292 | * This is a quick hack that will be removed soon, once we remove |
277 | * the tmp breakpoints from ptrace | 293 | * the tmp breakpoints from ptrace |
278 | */ | 294 | */ |
279 | if (!bp->attr.disabled || bp->callback == perf_bp_event) | 295 | if (!bp->attr.disabled || !bp->overflow_handler) |
280 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | 296 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
281 | 297 | ||
282 | return ret; | 298 | return ret; |
283 | } | 299 | } |
284 | 300 | ||
285 | int register_perf_hw_breakpoint(struct perf_event *bp) | ||
286 | { | ||
287 | bp->callback = perf_bp_event; | ||
288 | |||
289 | return __register_perf_hw_breakpoint(bp); | ||
290 | } | ||
291 | |||
292 | /** | 301 | /** |
293 | * register_user_hw_breakpoint - register a hardware breakpoint for user space | 302 | * register_user_hw_breakpoint - register a hardware breakpoint for user space |
294 | * @attr: breakpoint attributes | 303 | * @attr: breakpoint attributes |
@@ -297,7 +306,7 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
297 | */ | 306 | */ |
298 | struct perf_event * | 307 | struct perf_event * |
299 | register_user_hw_breakpoint(struct perf_event_attr *attr, | 308 | register_user_hw_breakpoint(struct perf_event_attr *attr, |
300 | perf_callback_t triggered, | 309 | perf_overflow_handler_t triggered, |
301 | struct task_struct *tsk) | 310 | struct task_struct *tsk) |
302 | { | 311 | { |
303 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 312 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); |
@@ -311,19 +320,40 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
311 | * @triggered: callback to trigger when we hit the breakpoint | 320 | * @triggered: callback to trigger when we hit the breakpoint |
312 | * @tsk: pointer to 'task_struct' of the process to which the address belongs | 321 | * @tsk: pointer to 'task_struct' of the process to which the address belongs |
313 | */ | 322 | */ |
314 | struct perf_event * | 323 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
315 | modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr, | ||
316 | perf_callback_t triggered, | ||
317 | struct task_struct *tsk) | ||
318 | { | 324 | { |
319 | /* | 325 | u64 old_addr = bp->attr.bp_addr; |
320 | * FIXME: do it without unregistering | 326 | int old_type = bp->attr.bp_type; |
321 | * - We don't want to lose our slot | 327 | int old_len = bp->attr.bp_len; |
322 | * - If the new bp is incorrect, don't lose the older one | 328 | int err = 0; |
323 | */ | ||
324 | unregister_hw_breakpoint(bp); | ||
325 | 329 | ||
326 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 330 | perf_event_disable(bp); |
331 | |||
332 | bp->attr.bp_addr = attr->bp_addr; | ||
333 | bp->attr.bp_type = attr->bp_type; | ||
334 | bp->attr.bp_len = attr->bp_len; | ||
335 | |||
336 | if (attr->disabled) | ||
337 | goto end; | ||
338 | |||
339 | err = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | ||
340 | if (!err) | ||
341 | perf_event_enable(bp); | ||
342 | |||
343 | if (err) { | ||
344 | bp->attr.bp_addr = old_addr; | ||
345 | bp->attr.bp_type = old_type; | ||
346 | bp->attr.bp_len = old_len; | ||
347 | if (!bp->attr.disabled) | ||
348 | perf_event_enable(bp); | ||
349 | |||
350 | return err; | ||
351 | } | ||
352 | |||
353 | end: | ||
354 | bp->attr.disabled = attr->disabled; | ||
355 | |||
356 | return 0; | ||
327 | } | 357 | } |
328 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); | 358 | EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint); |
329 | 359 | ||
@@ -348,7 +378,7 @@ EXPORT_SYMBOL_GPL(unregister_hw_breakpoint); | |||
348 | */ | 378 | */ |
349 | struct perf_event ** | 379 | struct perf_event ** |
350 | register_wide_hw_breakpoint(struct perf_event_attr *attr, | 380 | register_wide_hw_breakpoint(struct perf_event_attr *attr, |
351 | perf_callback_t triggered) | 381 | perf_overflow_handler_t triggered) |
352 | { | 382 | { |
353 | struct perf_event **cpu_events, **pevent, *bp; | 383 | struct perf_event **cpu_events, **pevent, *bp; |
354 | long err; | 384 | long err; |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 7d7014634022..2eb517e23514 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -129,6 +129,7 @@ struct task_struct *kgdb_usethread; | |||
129 | struct task_struct *kgdb_contthread; | 129 | struct task_struct *kgdb_contthread; |
130 | 130 | ||
131 | int kgdb_single_step; | 131 | int kgdb_single_step; |
132 | pid_t kgdb_sstep_pid; | ||
132 | 133 | ||
133 | /* Our I/O buffers. */ | 134 | /* Our I/O buffers. */ |
134 | static char remcom_in_buffer[BUFMAX]; | 135 | static char remcom_in_buffer[BUFMAX]; |
@@ -541,12 +542,17 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid) | |||
541 | */ | 542 | */ |
542 | if (tid == 0 || tid == -1) | 543 | if (tid == 0 || tid == -1) |
543 | tid = -atomic_read(&kgdb_active) - 2; | 544 | tid = -atomic_read(&kgdb_active) - 2; |
544 | if (tid < 0) { | 545 | if (tid < -1 && tid > -NR_CPUS - 2) { |
545 | if (kgdb_info[-tid - 2].task) | 546 | if (kgdb_info[-tid - 2].task) |
546 | return kgdb_info[-tid - 2].task; | 547 | return kgdb_info[-tid - 2].task; |
547 | else | 548 | else |
548 | return idle_task(-tid - 2); | 549 | return idle_task(-tid - 2); |
549 | } | 550 | } |
551 | if (tid <= 0) { | ||
552 | printk(KERN_ERR "KGDB: Internal thread select error\n"); | ||
553 | dump_stack(); | ||
554 | return NULL; | ||
555 | } | ||
550 | 556 | ||
551 | /* | 557 | /* |
552 | * find_task_by_pid_ns() does not take the tasklist lock anymore | 558 | * find_task_by_pid_ns() does not take the tasklist lock anymore |
@@ -619,7 +625,8 @@ static void kgdb_flush_swbreak_addr(unsigned long addr) | |||
619 | static int kgdb_activate_sw_breakpoints(void) | 625 | static int kgdb_activate_sw_breakpoints(void) |
620 | { | 626 | { |
621 | unsigned long addr; | 627 | unsigned long addr; |
622 | int error = 0; | 628 | int error; |
629 | int ret = 0; | ||
623 | int i; | 630 | int i; |
624 | 631 | ||
625 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 632 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
@@ -629,13 +636,16 @@ static int kgdb_activate_sw_breakpoints(void) | |||
629 | addr = kgdb_break[i].bpt_addr; | 636 | addr = kgdb_break[i].bpt_addr; |
630 | error = kgdb_arch_set_breakpoint(addr, | 637 | error = kgdb_arch_set_breakpoint(addr, |
631 | kgdb_break[i].saved_instr); | 638 | kgdb_break[i].saved_instr); |
632 | if (error) | 639 | if (error) { |
633 | return error; | 640 | ret = error; |
641 | printk(KERN_INFO "KGDB: BP install failed: %lx", addr); | ||
642 | continue; | ||
643 | } | ||
634 | 644 | ||
635 | kgdb_flush_swbreak_addr(addr); | 645 | kgdb_flush_swbreak_addr(addr); |
636 | kgdb_break[i].state = BP_ACTIVE; | 646 | kgdb_break[i].state = BP_ACTIVE; |
637 | } | 647 | } |
638 | return 0; | 648 | return ret; |
639 | } | 649 | } |
640 | 650 | ||
641 | static int kgdb_set_sw_break(unsigned long addr) | 651 | static int kgdb_set_sw_break(unsigned long addr) |
@@ -682,7 +692,8 @@ static int kgdb_set_sw_break(unsigned long addr) | |||
682 | static int kgdb_deactivate_sw_breakpoints(void) | 692 | static int kgdb_deactivate_sw_breakpoints(void) |
683 | { | 693 | { |
684 | unsigned long addr; | 694 | unsigned long addr; |
685 | int error = 0; | 695 | int error; |
696 | int ret = 0; | ||
686 | int i; | 697 | int i; |
687 | 698 | ||
688 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { | 699 | for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) { |
@@ -691,13 +702,15 @@ static int kgdb_deactivate_sw_breakpoints(void) | |||
691 | addr = kgdb_break[i].bpt_addr; | 702 | addr = kgdb_break[i].bpt_addr; |
692 | error = kgdb_arch_remove_breakpoint(addr, | 703 | error = kgdb_arch_remove_breakpoint(addr, |
693 | kgdb_break[i].saved_instr); | 704 | kgdb_break[i].saved_instr); |
694 | if (error) | 705 | if (error) { |
695 | return error; | 706 | printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr); |
707 | ret = error; | ||
708 | } | ||
696 | 709 | ||
697 | kgdb_flush_swbreak_addr(addr); | 710 | kgdb_flush_swbreak_addr(addr); |
698 | kgdb_break[i].state = BP_SET; | 711 | kgdb_break[i].state = BP_SET; |
699 | } | 712 | } |
700 | return 0; | 713 | return ret; |
701 | } | 714 | } |
702 | 715 | ||
703 | static int kgdb_remove_sw_break(unsigned long addr) | 716 | static int kgdb_remove_sw_break(unsigned long addr) |
@@ -1204,8 +1217,10 @@ static int gdb_cmd_exception_pass(struct kgdb_state *ks) | |||
1204 | return 1; | 1217 | return 1; |
1205 | 1218 | ||
1206 | } else { | 1219 | } else { |
1207 | error_packet(remcom_out_buffer, -EINVAL); | 1220 | kgdb_msg_write("KGDB only knows signal 9 (pass)" |
1208 | return 0; | 1221 | " and 15 (pass and disconnect)\n" |
1222 | "Executing a continue without signal passing\n", 0); | ||
1223 | remcom_in_buffer[0] = 'c'; | ||
1209 | } | 1224 | } |
1210 | 1225 | ||
1211 | /* Indicate fall through */ | 1226 | /* Indicate fall through */ |
@@ -1395,6 +1410,7 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) | |||
1395 | struct kgdb_state kgdb_var; | 1410 | struct kgdb_state kgdb_var; |
1396 | struct kgdb_state *ks = &kgdb_var; | 1411 | struct kgdb_state *ks = &kgdb_var; |
1397 | unsigned long flags; | 1412 | unsigned long flags; |
1413 | int sstep_tries = 100; | ||
1398 | int error = 0; | 1414 | int error = 0; |
1399 | int i, cpu; | 1415 | int i, cpu; |
1400 | 1416 | ||
@@ -1425,13 +1441,14 @@ acquirelock: | |||
1425 | cpu_relax(); | 1441 | cpu_relax(); |
1426 | 1442 | ||
1427 | /* | 1443 | /* |
1428 | * Do not start the debugger connection on this CPU if the last | 1444 | * For single stepping, try to only enter on the processor |
1429 | * instance of the exception handler wanted to come into the | 1445 | * that was single stepping. To gaurd against a deadlock, the |
1430 | * debugger on a different CPU via a single step | 1446 | * kernel will only try for the value of sstep_tries before |
1447 | * giving up and continuing on. | ||
1431 | */ | 1448 | */ |
1432 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && | 1449 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1 && |
1433 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { | 1450 | (kgdb_info[cpu].task && |
1434 | 1451 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | |
1435 | atomic_set(&kgdb_active, -1); | 1452 | atomic_set(&kgdb_active, -1); |
1436 | touch_softlockup_watchdog(); | 1453 | touch_softlockup_watchdog(); |
1437 | clocksource_touch_watchdog(); | 1454 | clocksource_touch_watchdog(); |
@@ -1524,6 +1541,13 @@ acquirelock: | |||
1524 | } | 1541 | } |
1525 | 1542 | ||
1526 | kgdb_restore: | 1543 | kgdb_restore: |
1544 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { | ||
1545 | int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step); | ||
1546 | if (kgdb_info[sstep_cpu].task) | ||
1547 | kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid; | ||
1548 | else | ||
1549 | kgdb_sstep_pid = 0; | ||
1550 | } | ||
1527 | /* Free kgdb_active */ | 1551 | /* Free kgdb_active */ |
1528 | atomic_set(&kgdb_active, -1); | 1552 | atomic_set(&kgdb_active, -1); |
1529 | touch_softlockup_watchdog(); | 1553 | touch_softlockup_watchdog(); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index f5dcd36d3151..4f8df01dbe51 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -168,7 +168,7 @@ static void lock_time_inc(struct lock_time *lt, u64 time) | |||
168 | if (time > lt->max) | 168 | if (time > lt->max) |
169 | lt->max = time; | 169 | lt->max = time; |
170 | 170 | ||
171 | if (time < lt->min || !lt->min) | 171 | if (time < lt->min || !lt->nr) |
172 | lt->min = time; | 172 | lt->min = time; |
173 | 173 | ||
174 | lt->total += time; | 174 | lt->total += time; |
@@ -177,8 +177,15 @@ static void lock_time_inc(struct lock_time *lt, u64 time) | |||
177 | 177 | ||
178 | static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) | 178 | static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) |
179 | { | 179 | { |
180 | dst->min += src->min; | 180 | if (!src->nr) |
181 | dst->max += src->max; | 181 | return; |
182 | |||
183 | if (src->max > dst->max) | ||
184 | dst->max = src->max; | ||
185 | |||
186 | if (src->min < dst->min || !dst->nr) | ||
187 | dst->min = src->min; | ||
188 | |||
182 | dst->total += src->total; | 189 | dst->total += src->total; |
183 | dst->nr += src->nr; | 190 | dst->nr += src->nr; |
184 | } | 191 | } |
@@ -379,7 +386,8 @@ static int save_trace(struct stack_trace *trace) | |||
379 | * complete trace that maxes out the entries provided will be reported | 386 | * complete trace that maxes out the entries provided will be reported |
380 | * as incomplete, friggin useless </rant> | 387 | * as incomplete, friggin useless </rant> |
381 | */ | 388 | */ |
382 | if (trace->entries[trace->nr_entries-1] == ULONG_MAX) | 389 | if (trace->nr_entries != 0 && |
390 | trace->entries[trace->nr_entries-1] == ULONG_MAX) | ||
383 | trace->nr_entries--; | 391 | trace->nr_entries--; |
384 | 392 | ||
385 | trace->max_entries = trace->nr_entries; | 393 | trace->max_entries = trace->nr_entries; |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 40a996ec39fa..e73e53c7582f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -36,7 +36,7 @@ | |||
36 | /* | 36 | /* |
37 | * Each CPU has a list of per CPU events: | 37 | * Each CPU has a list of per CPU events: |
38 | */ | 38 | */ |
39 | DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); | 39 | static DEFINE_PER_CPU(struct perf_cpu_context, perf_cpu_context); |
40 | 40 | ||
41 | int perf_max_events __read_mostly = 1; | 41 | int perf_max_events __read_mostly = 1; |
42 | static int perf_reserved_percpu __read_mostly; | 42 | static int perf_reserved_percpu __read_mostly; |
@@ -567,7 +567,7 @@ static void __perf_event_disable(void *info) | |||
567 | * is the current context on this CPU and preemption is disabled, | 567 | * is the current context on this CPU and preemption is disabled, |
568 | * hence we can't get into perf_event_task_sched_out for this context. | 568 | * hence we can't get into perf_event_task_sched_out for this context. |
569 | */ | 569 | */ |
570 | static void perf_event_disable(struct perf_event *event) | 570 | void perf_event_disable(struct perf_event *event) |
571 | { | 571 | { |
572 | struct perf_event_context *ctx = event->ctx; | 572 | struct perf_event_context *ctx = event->ctx; |
573 | struct task_struct *task = ctx->task; | 573 | struct task_struct *task = ctx->task; |
@@ -971,7 +971,7 @@ static void __perf_event_enable(void *info) | |||
971 | * perf_event_for_each_child or perf_event_for_each as described | 971 | * perf_event_for_each_child or perf_event_for_each as described |
972 | * for perf_event_disable. | 972 | * for perf_event_disable. |
973 | */ | 973 | */ |
974 | static void perf_event_enable(struct perf_event *event) | 974 | void perf_event_enable(struct perf_event *event) |
975 | { | 975 | { |
976 | struct perf_event_context *ctx = event->ctx; | 976 | struct perf_event_context *ctx = event->ctx; |
977 | struct task_struct *task = ctx->task; | 977 | struct task_struct *task = ctx->task; |
@@ -1579,7 +1579,6 @@ static void | |||
1579 | __perf_event_init_context(struct perf_event_context *ctx, | 1579 | __perf_event_init_context(struct perf_event_context *ctx, |
1580 | struct task_struct *task) | 1580 | struct task_struct *task) |
1581 | { | 1581 | { |
1582 | memset(ctx, 0, sizeof(*ctx)); | ||
1583 | spin_lock_init(&ctx->lock); | 1582 | spin_lock_init(&ctx->lock); |
1584 | mutex_init(&ctx->mutex); | 1583 | mutex_init(&ctx->mutex); |
1585 | INIT_LIST_HEAD(&ctx->group_list); | 1584 | INIT_LIST_HEAD(&ctx->group_list); |
@@ -1654,7 +1653,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu) | |||
1654 | } | 1653 | } |
1655 | 1654 | ||
1656 | if (!ctx) { | 1655 | if (!ctx) { |
1657 | ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | 1656 | ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL); |
1658 | err = -ENOMEM; | 1657 | err = -ENOMEM; |
1659 | if (!ctx) | 1658 | if (!ctx) |
1660 | goto errout; | 1659 | goto errout; |
@@ -4011,6 +4010,7 @@ static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer) | |||
4011 | event->pmu->read(event); | 4010 | event->pmu->read(event); |
4012 | 4011 | ||
4013 | data.addr = 0; | 4012 | data.addr = 0; |
4013 | data.raw = NULL; | ||
4014 | data.period = event->hw.last_period; | 4014 | data.period = event->hw.last_period; |
4015 | regs = get_irq_regs(); | 4015 | regs = get_irq_regs(); |
4016 | /* | 4016 | /* |
@@ -4080,8 +4080,7 @@ static void cpu_clock_perf_event_update(struct perf_event *event) | |||
4080 | u64 now; | 4080 | u64 now; |
4081 | 4081 | ||
4082 | now = cpu_clock(cpu); | 4082 | now = cpu_clock(cpu); |
4083 | prev = atomic64_read(&event->hw.prev_count); | 4083 | prev = atomic64_xchg(&event->hw.prev_count, now); |
4084 | atomic64_set(&event->hw.prev_count, now); | ||
4085 | atomic64_add(now - prev, &event->count); | 4084 | atomic64_add(now - prev, &event->count); |
4086 | } | 4085 | } |
4087 | 4086 | ||
@@ -4286,15 +4285,8 @@ static void bp_perf_event_destroy(struct perf_event *event) | |||
4286 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) | 4285 | static const struct pmu *bp_perf_event_init(struct perf_event *bp) |
4287 | { | 4286 | { |
4288 | int err; | 4287 | int err; |
4289 | /* | 4288 | |
4290 | * The breakpoint is already filled if we haven't created the counter | 4289 | err = register_perf_hw_breakpoint(bp); |
4291 | * through perf syscall | ||
4292 | * FIXME: manage to get trigerred to NULL if it comes from syscalls | ||
4293 | */ | ||
4294 | if (!bp->callback) | ||
4295 | err = register_perf_hw_breakpoint(bp); | ||
4296 | else | ||
4297 | err = __register_perf_hw_breakpoint(bp); | ||
4298 | if (err) | 4290 | if (err) |
4299 | return ERR_PTR(err); | 4291 | return ERR_PTR(err); |
4300 | 4292 | ||
@@ -4308,6 +4300,7 @@ void perf_bp_event(struct perf_event *bp, void *data) | |||
4308 | struct perf_sample_data sample; | 4300 | struct perf_sample_data sample; |
4309 | struct pt_regs *regs = data; | 4301 | struct pt_regs *regs = data; |
4310 | 4302 | ||
4303 | sample.raw = NULL; | ||
4311 | sample.addr = bp->attr.bp_addr; | 4304 | sample.addr = bp->attr.bp_addr; |
4312 | 4305 | ||
4313 | if (!perf_exclude_event(bp, regs)) | 4306 | if (!perf_exclude_event(bp, regs)) |
@@ -4390,7 +4383,7 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4390 | struct perf_event_context *ctx, | 4383 | struct perf_event_context *ctx, |
4391 | struct perf_event *group_leader, | 4384 | struct perf_event *group_leader, |
4392 | struct perf_event *parent_event, | 4385 | struct perf_event *parent_event, |
4393 | perf_callback_t callback, | 4386 | perf_overflow_handler_t overflow_handler, |
4394 | gfp_t gfpflags) | 4387 | gfp_t gfpflags) |
4395 | { | 4388 | { |
4396 | const struct pmu *pmu; | 4389 | const struct pmu *pmu; |
@@ -4433,10 +4426,10 @@ perf_event_alloc(struct perf_event_attr *attr, | |||
4433 | 4426 | ||
4434 | event->state = PERF_EVENT_STATE_INACTIVE; | 4427 | event->state = PERF_EVENT_STATE_INACTIVE; |
4435 | 4428 | ||
4436 | if (!callback && parent_event) | 4429 | if (!overflow_handler && parent_event) |
4437 | callback = parent_event->callback; | 4430 | overflow_handler = parent_event->overflow_handler; |
4438 | 4431 | ||
4439 | event->callback = callback; | 4432 | event->overflow_handler = overflow_handler; |
4440 | 4433 | ||
4441 | if (attr->disabled) | 4434 | if (attr->disabled) |
4442 | event->state = PERF_EVENT_STATE_OFF; | 4435 | event->state = PERF_EVENT_STATE_OFF; |
@@ -4776,7 +4769,8 @@ err_put_context: | |||
4776 | */ | 4769 | */ |
4777 | struct perf_event * | 4770 | struct perf_event * |
4778 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | 4771 | perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, |
4779 | pid_t pid, perf_callback_t callback) | 4772 | pid_t pid, |
4773 | perf_overflow_handler_t overflow_handler) | ||
4780 | { | 4774 | { |
4781 | struct perf_event *event; | 4775 | struct perf_event *event; |
4782 | struct perf_event_context *ctx; | 4776 | struct perf_event_context *ctx; |
@@ -4793,7 +4787,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
4793 | } | 4787 | } |
4794 | 4788 | ||
4795 | event = perf_event_alloc(attr, cpu, ctx, NULL, | 4789 | event = perf_event_alloc(attr, cpu, ctx, NULL, |
4796 | NULL, callback, GFP_KERNEL); | 4790 | NULL, overflow_handler, GFP_KERNEL); |
4797 | if (IS_ERR(event)) { | 4791 | if (IS_ERR(event)) { |
4798 | err = PTR_ERR(event); | 4792 | err = PTR_ERR(event); |
4799 | goto err_put_context; | 4793 | goto err_put_context; |
@@ -5090,7 +5084,7 @@ again: | |||
5090 | */ | 5084 | */ |
5091 | int perf_event_init_task(struct task_struct *child) | 5085 | int perf_event_init_task(struct task_struct *child) |
5092 | { | 5086 | { |
5093 | struct perf_event_context *child_ctx, *parent_ctx; | 5087 | struct perf_event_context *child_ctx = NULL, *parent_ctx; |
5094 | struct perf_event_context *cloned_ctx; | 5088 | struct perf_event_context *cloned_ctx; |
5095 | struct perf_event *event; | 5089 | struct perf_event *event; |
5096 | struct task_struct *parent = current; | 5090 | struct task_struct *parent = current; |
@@ -5106,20 +5100,6 @@ int perf_event_init_task(struct task_struct *child) | |||
5106 | return 0; | 5100 | return 0; |
5107 | 5101 | ||
5108 | /* | 5102 | /* |
5109 | * This is executed from the parent task context, so inherit | ||
5110 | * events that have been marked for cloning. | ||
5111 | * First allocate and initialize a context for the child. | ||
5112 | */ | ||
5113 | |||
5114 | child_ctx = kmalloc(sizeof(struct perf_event_context), GFP_KERNEL); | ||
5115 | if (!child_ctx) | ||
5116 | return -ENOMEM; | ||
5117 | |||
5118 | __perf_event_init_context(child_ctx, child); | ||
5119 | child->perf_event_ctxp = child_ctx; | ||
5120 | get_task_struct(child); | ||
5121 | |||
5122 | /* | ||
5123 | * If the parent's context is a clone, pin it so it won't get | 5103 | * If the parent's context is a clone, pin it so it won't get |
5124 | * swapped under us. | 5104 | * swapped under us. |
5125 | */ | 5105 | */ |
@@ -5149,6 +5129,26 @@ int perf_event_init_task(struct task_struct *child) | |||
5149 | continue; | 5129 | continue; |
5150 | } | 5130 | } |
5151 | 5131 | ||
5132 | if (!child->perf_event_ctxp) { | ||
5133 | /* | ||
5134 | * This is executed from the parent task context, so | ||
5135 | * inherit events that have been marked for cloning. | ||
5136 | * First allocate and initialize a context for the | ||
5137 | * child. | ||
5138 | */ | ||
5139 | |||
5140 | child_ctx = kzalloc(sizeof(struct perf_event_context), | ||
5141 | GFP_KERNEL); | ||
5142 | if (!child_ctx) { | ||
5143 | ret = -ENOMEM; | ||
5144 | goto exit; | ||
5145 | } | ||
5146 | |||
5147 | __perf_event_init_context(child_ctx, child); | ||
5148 | child->perf_event_ctxp = child_ctx; | ||
5149 | get_task_struct(child); | ||
5150 | } | ||
5151 | |||
5152 | ret = inherit_group(event, parent, parent_ctx, | 5152 | ret = inherit_group(event, parent, parent_ctx, |
5153 | child, child_ctx); | 5153 | child, child_ctx); |
5154 | if (ret) { | 5154 | if (ret) { |
@@ -5177,6 +5177,7 @@ int perf_event_init_task(struct task_struct *child) | |||
5177 | get_ctx(child_ctx->parent_ctx); | 5177 | get_ctx(child_ctx->parent_ctx); |
5178 | } | 5178 | } |
5179 | 5179 | ||
5180 | exit: | ||
5180 | mutex_unlock(&parent_ctx->mutex); | 5181 | mutex_unlock(&parent_ctx->mutex); |
5181 | 5182 | ||
5182 | perf_unpin_context(parent_ctx); | 5183 | perf_unpin_context(parent_ctx); |
diff --git a/kernel/resource.c b/kernel/resource.c index fb11a58b9594..dc15686b7a77 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -308,35 +308,37 @@ static int find_resource(struct resource *root, struct resource *new, | |||
308 | void *alignf_data) | 308 | void *alignf_data) |
309 | { | 309 | { |
310 | struct resource *this = root->child; | 310 | struct resource *this = root->child; |
311 | resource_size_t start, end; | ||
311 | 312 | ||
312 | new->start = root->start; | 313 | start = root->start; |
313 | /* | 314 | /* |
314 | * Skip past an allocated resource that starts at 0, since the assignment | 315 | * Skip past an allocated resource that starts at 0, since the assignment |
315 | * of this->start - 1 to new->end below would cause an underflow. | 316 | * of this->start - 1 to new->end below would cause an underflow. |
316 | */ | 317 | */ |
317 | if (this && this->start == 0) { | 318 | if (this && this->start == 0) { |
318 | new->start = this->end + 1; | 319 | start = this->end + 1; |
319 | this = this->sibling; | 320 | this = this->sibling; |
320 | } | 321 | } |
321 | for(;;) { | 322 | for(;;) { |
322 | if (this) | 323 | if (this) |
323 | new->end = this->start - 1; | 324 | end = this->start - 1; |
324 | else | 325 | else |
325 | new->end = root->end; | 326 | end = root->end; |
326 | if (new->start < min) | 327 | if (start < min) |
327 | new->start = min; | 328 | start = min; |
328 | if (new->end > max) | 329 | if (end > max) |
329 | new->end = max; | 330 | end = max; |
330 | new->start = ALIGN(new->start, align); | 331 | start = ALIGN(start, align); |
331 | if (alignf) | 332 | if (alignf) |
332 | alignf(alignf_data, new, size, align); | 333 | alignf(alignf_data, new, size, align); |
333 | if (new->start < new->end && new->end - new->start >= size - 1) { | 334 | if (start < end && end - start >= size - 1) { |
334 | new->end = new->start + size - 1; | 335 | new->start = start; |
336 | new->end = start + size - 1; | ||
335 | return 0; | 337 | return 0; |
336 | } | 338 | } |
337 | if (!this) | 339 | if (!this) |
338 | break; | 340 | break; |
339 | new->start = this->end + 1; | 341 | start = this->end + 1; |
340 | this = this->sibling; | 342 | this = this->sibling; |
341 | } | 343 | } |
342 | return -EBUSY; | 344 | return -EBUSY; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 874f2893cff0..88bd9ae2a9ed 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -1361,11 +1361,7 @@ int trace_array_vprintk(struct trace_array *tr, | |||
1361 | pause_graph_tracing(); | 1361 | pause_graph_tracing(); |
1362 | raw_local_irq_save(irq_flags); | 1362 | raw_local_irq_save(irq_flags); |
1363 | __raw_spin_lock(&trace_buf_lock); | 1363 | __raw_spin_lock(&trace_buf_lock); |
1364 | if (args == NULL) { | 1364 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
1365 | strncpy(trace_buf, fmt, TRACE_BUF_SIZE); | ||
1366 | len = strlen(trace_buf); | ||
1367 | } else | ||
1368 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | ||
1369 | 1365 | ||
1370 | size = sizeof(*entry) + len + 1; | 1366 | size = sizeof(*entry) + len + 1; |
1371 | buffer = tr->buffer; | 1367 | buffer = tr->buffer; |
@@ -1516,6 +1512,8 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1516 | int i = (int)*pos; | 1512 | int i = (int)*pos; |
1517 | void *ent; | 1513 | void *ent; |
1518 | 1514 | ||
1515 | WARN_ON_ONCE(iter->leftover); | ||
1516 | |||
1519 | (*pos)++; | 1517 | (*pos)++; |
1520 | 1518 | ||
1521 | /* can't go backwards */ | 1519 | /* can't go backwards */ |
@@ -1614,8 +1612,16 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1614 | ; | 1612 | ; |
1615 | 1613 | ||
1616 | } else { | 1614 | } else { |
1617 | l = *pos - 1; | 1615 | /* |
1618 | p = s_next(m, p, &l); | 1616 | * If we overflowed the seq_file before, then we want |
1617 | * to just reuse the trace_seq buffer again. | ||
1618 | */ | ||
1619 | if (iter->leftover) | ||
1620 | p = iter; | ||
1621 | else { | ||
1622 | l = *pos - 1; | ||
1623 | p = s_next(m, p, &l); | ||
1624 | } | ||
1619 | } | 1625 | } |
1620 | 1626 | ||
1621 | trace_event_read_lock(); | 1627 | trace_event_read_lock(); |
@@ -1923,6 +1929,7 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
1923 | static int s_show(struct seq_file *m, void *v) | 1929 | static int s_show(struct seq_file *m, void *v) |
1924 | { | 1930 | { |
1925 | struct trace_iterator *iter = v; | 1931 | struct trace_iterator *iter = v; |
1932 | int ret; | ||
1926 | 1933 | ||
1927 | if (iter->ent == NULL) { | 1934 | if (iter->ent == NULL) { |
1928 | if (iter->tr) { | 1935 | if (iter->tr) { |
@@ -1942,9 +1949,27 @@ static int s_show(struct seq_file *m, void *v) | |||
1942 | if (!(trace_flags & TRACE_ITER_VERBOSE)) | 1949 | if (!(trace_flags & TRACE_ITER_VERBOSE)) |
1943 | print_func_help_header(m); | 1950 | print_func_help_header(m); |
1944 | } | 1951 | } |
1952 | } else if (iter->leftover) { | ||
1953 | /* | ||
1954 | * If we filled the seq_file buffer earlier, we | ||
1955 | * want to just show it now. | ||
1956 | */ | ||
1957 | ret = trace_print_seq(m, &iter->seq); | ||
1958 | |||
1959 | /* ret should this time be zero, but you never know */ | ||
1960 | iter->leftover = ret; | ||
1961 | |||
1945 | } else { | 1962 | } else { |
1946 | print_trace_line(iter); | 1963 | print_trace_line(iter); |
1947 | trace_print_seq(m, &iter->seq); | 1964 | ret = trace_print_seq(m, &iter->seq); |
1965 | /* | ||
1966 | * If we overflow the seq_file buffer, then it will | ||
1967 | * ask us for this data again at start up. | ||
1968 | * Use that instead. | ||
1969 | * ret is 0 if seq_file write succeeded. | ||
1970 | * -1 otherwise. | ||
1971 | */ | ||
1972 | iter->leftover = ret; | ||
1948 | } | 1973 | } |
1949 | 1974 | ||
1950 | return 0; | 1975 | return 0; |
@@ -2898,6 +2923,10 @@ static int tracing_release_pipe(struct inode *inode, struct file *file) | |||
2898 | else | 2923 | else |
2899 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | 2924 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); |
2900 | 2925 | ||
2926 | |||
2927 | if (iter->trace->pipe_close) | ||
2928 | iter->trace->pipe_close(iter); | ||
2929 | |||
2901 | mutex_unlock(&trace_types_lock); | 2930 | mutex_unlock(&trace_types_lock); |
2902 | 2931 | ||
2903 | free_cpumask_var(iter->started); | 2932 | free_cpumask_var(iter->started); |
@@ -3320,6 +3349,16 @@ tracing_entries_write(struct file *filp, const char __user *ubuf, | |||
3320 | return cnt; | 3349 | return cnt; |
3321 | } | 3350 | } |
3322 | 3351 | ||
3352 | static int mark_printk(const char *fmt, ...) | ||
3353 | { | ||
3354 | int ret; | ||
3355 | va_list args; | ||
3356 | va_start(args, fmt); | ||
3357 | ret = trace_vprintk(0, fmt, args); | ||
3358 | va_end(args); | ||
3359 | return ret; | ||
3360 | } | ||
3361 | |||
3323 | static ssize_t | 3362 | static ssize_t |
3324 | tracing_mark_write(struct file *filp, const char __user *ubuf, | 3363 | tracing_mark_write(struct file *filp, const char __user *ubuf, |
3325 | size_t cnt, loff_t *fpos) | 3364 | size_t cnt, loff_t *fpos) |
@@ -3346,7 +3385,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, | |||
3346 | } else | 3385 | } else |
3347 | buf[cnt] = '\0'; | 3386 | buf[cnt] = '\0'; |
3348 | 3387 | ||
3349 | cnt = trace_vprintk(0, buf, NULL); | 3388 | cnt = mark_printk("%s", buf); |
3350 | kfree(buf); | 3389 | kfree(buf); |
3351 | *fpos += cnt; | 3390 | *fpos += cnt; |
3352 | 3391 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 1d7f4830a80d..7fa33cab6962 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -272,6 +272,7 @@ struct tracer_flags { | |||
272 | * @pipe_open: called when the trace_pipe file is opened | 272 | * @pipe_open: called when the trace_pipe file is opened |
273 | * @wait_pipe: override how the user waits for traces on trace_pipe | 273 | * @wait_pipe: override how the user waits for traces on trace_pipe |
274 | * @close: called when the trace file is released | 274 | * @close: called when the trace file is released |
275 | * @pipe_close: called when the trace_pipe file is released | ||
275 | * @read: override the default read callback on trace_pipe | 276 | * @read: override the default read callback on trace_pipe |
276 | * @splice_read: override the default splice_read callback on trace_pipe | 277 | * @splice_read: override the default splice_read callback on trace_pipe |
277 | * @selftest: selftest to run on boot (see trace_selftest.c) | 278 | * @selftest: selftest to run on boot (see trace_selftest.c) |
@@ -290,6 +291,7 @@ struct tracer { | |||
290 | void (*pipe_open)(struct trace_iterator *iter); | 291 | void (*pipe_open)(struct trace_iterator *iter); |
291 | void (*wait_pipe)(struct trace_iterator *iter); | 292 | void (*wait_pipe)(struct trace_iterator *iter); |
292 | void (*close)(struct trace_iterator *iter); | 293 | void (*close)(struct trace_iterator *iter); |
294 | void (*pipe_close)(struct trace_iterator *iter); | ||
293 | ssize_t (*read)(struct trace_iterator *iter, | 295 | ssize_t (*read)(struct trace_iterator *iter, |
294 | struct file *filp, char __user *ubuf, | 296 | struct file *filp, char __user *ubuf, |
295 | size_t cnt, loff_t *ppos); | 297 | size_t cnt, loff_t *ppos); |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 45e6c01b2e4d..a43d009c561a 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -14,9 +14,20 @@ | |||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | #include "trace_output.h" | 15 | #include "trace_output.h" |
16 | 16 | ||
17 | struct fgraph_data { | 17 | struct fgraph_cpu_data { |
18 | pid_t last_pid; | 18 | pid_t last_pid; |
19 | int depth; | 19 | int depth; |
20 | int ignore; | ||
21 | }; | ||
22 | |||
23 | struct fgraph_data { | ||
24 | struct fgraph_cpu_data *cpu_data; | ||
25 | |||
26 | /* Place to preserve last processed entry. */ | ||
27 | struct ftrace_graph_ent_entry ent; | ||
28 | struct ftrace_graph_ret_entry ret; | ||
29 | int failed; | ||
30 | int cpu; | ||
20 | }; | 31 | }; |
21 | 32 | ||
22 | #define TRACE_GRAPH_INDENT 2 | 33 | #define TRACE_GRAPH_INDENT 2 |
@@ -384,7 +395,7 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
384 | if (!data) | 395 | if (!data) |
385 | return TRACE_TYPE_HANDLED; | 396 | return TRACE_TYPE_HANDLED; |
386 | 397 | ||
387 | last_pid = &(per_cpu_ptr(data, cpu)->last_pid); | 398 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
388 | 399 | ||
389 | if (*last_pid == pid) | 400 | if (*last_pid == pid) |
390 | return TRACE_TYPE_HANDLED; | 401 | return TRACE_TYPE_HANDLED; |
@@ -435,26 +446,49 @@ static struct ftrace_graph_ret_entry * | |||
435 | get_return_for_leaf(struct trace_iterator *iter, | 446 | get_return_for_leaf(struct trace_iterator *iter, |
436 | struct ftrace_graph_ent_entry *curr) | 447 | struct ftrace_graph_ent_entry *curr) |
437 | { | 448 | { |
438 | struct ring_buffer_iter *ring_iter; | 449 | struct fgraph_data *data = iter->private; |
450 | struct ring_buffer_iter *ring_iter = NULL; | ||
439 | struct ring_buffer_event *event; | 451 | struct ring_buffer_event *event; |
440 | struct ftrace_graph_ret_entry *next; | 452 | struct ftrace_graph_ret_entry *next; |
441 | 453 | ||
442 | ring_iter = iter->buffer_iter[iter->cpu]; | 454 | /* |
455 | * If the previous output failed to write to the seq buffer, | ||
456 | * then we just reuse the data from before. | ||
457 | */ | ||
458 | if (data && data->failed) { | ||
459 | curr = &data->ent; | ||
460 | next = &data->ret; | ||
461 | } else { | ||
443 | 462 | ||
444 | /* First peek to compare current entry and the next one */ | 463 | ring_iter = iter->buffer_iter[iter->cpu]; |
445 | if (ring_iter) | 464 | |
446 | event = ring_buffer_iter_peek(ring_iter, NULL); | 465 | /* First peek to compare current entry and the next one */ |
447 | else { | 466 | if (ring_iter) |
448 | /* We need to consume the current entry to see the next one */ | 467 | event = ring_buffer_iter_peek(ring_iter, NULL); |
449 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | 468 | else { |
450 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | 469 | /* |
451 | NULL); | 470 | * We need to consume the current entry to see |
452 | } | 471 | * the next one. |
472 | */ | ||
473 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
474 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | ||
475 | NULL); | ||
476 | } | ||
453 | 477 | ||
454 | if (!event) | 478 | if (!event) |
455 | return NULL; | 479 | return NULL; |
480 | |||
481 | next = ring_buffer_event_data(event); | ||
456 | 482 | ||
457 | next = ring_buffer_event_data(event); | 483 | if (data) { |
484 | /* | ||
485 | * Save current and next entries for later reference | ||
486 | * if the output fails. | ||
487 | */ | ||
488 | data->ent = *curr; | ||
489 | data->ret = *next; | ||
490 | } | ||
491 | } | ||
458 | 492 | ||
459 | if (next->ent.type != TRACE_GRAPH_RET) | 493 | if (next->ent.type != TRACE_GRAPH_RET) |
460 | return NULL; | 494 | return NULL; |
@@ -640,7 +674,7 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
640 | 674 | ||
641 | if (data) { | 675 | if (data) { |
642 | int cpu = iter->cpu; | 676 | int cpu = iter->cpu; |
643 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | 677 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
644 | 678 | ||
645 | /* | 679 | /* |
646 | * Comments display at + 1 to depth. Since | 680 | * Comments display at + 1 to depth. Since |
@@ -688,7 +722,7 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
688 | 722 | ||
689 | if (data) { | 723 | if (data) { |
690 | int cpu = iter->cpu; | 724 | int cpu = iter->cpu; |
691 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | 725 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
692 | 726 | ||
693 | *depth = call->depth; | 727 | *depth = call->depth; |
694 | } | 728 | } |
@@ -782,19 +816,34 @@ static enum print_line_t | |||
782 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 816 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
783 | struct trace_iterator *iter) | 817 | struct trace_iterator *iter) |
784 | { | 818 | { |
785 | int cpu = iter->cpu; | 819 | struct fgraph_data *data = iter->private; |
786 | struct ftrace_graph_ent *call = &field->graph_ent; | 820 | struct ftrace_graph_ent *call = &field->graph_ent; |
787 | struct ftrace_graph_ret_entry *leaf_ret; | 821 | struct ftrace_graph_ret_entry *leaf_ret; |
822 | static enum print_line_t ret; | ||
823 | int cpu = iter->cpu; | ||
788 | 824 | ||
789 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) | 825 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func)) |
790 | return TRACE_TYPE_PARTIAL_LINE; | 826 | return TRACE_TYPE_PARTIAL_LINE; |
791 | 827 | ||
792 | leaf_ret = get_return_for_leaf(iter, field); | 828 | leaf_ret = get_return_for_leaf(iter, field); |
793 | if (leaf_ret) | 829 | if (leaf_ret) |
794 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | 830 | ret = print_graph_entry_leaf(iter, field, leaf_ret, s); |
795 | else | 831 | else |
796 | return print_graph_entry_nested(iter, field, s, cpu); | 832 | ret = print_graph_entry_nested(iter, field, s, cpu); |
797 | 833 | ||
834 | if (data) { | ||
835 | /* | ||
836 | * If we failed to write our output, then we need to make | ||
837 | * note of it. Because we already consumed our entry. | ||
838 | */ | ||
839 | if (s->full) { | ||
840 | data->failed = 1; | ||
841 | data->cpu = cpu; | ||
842 | } else | ||
843 | data->failed = 0; | ||
844 | } | ||
845 | |||
846 | return ret; | ||
798 | } | 847 | } |
799 | 848 | ||
800 | static enum print_line_t | 849 | static enum print_line_t |
@@ -810,7 +859,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
810 | 859 | ||
811 | if (data) { | 860 | if (data) { |
812 | int cpu = iter->cpu; | 861 | int cpu = iter->cpu; |
813 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | 862 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); |
814 | 863 | ||
815 | /* | 864 | /* |
816 | * Comments display at + 1 to depth. This is the | 865 | * Comments display at + 1 to depth. This is the |
@@ -873,7 +922,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
873 | int i; | 922 | int i; |
874 | 923 | ||
875 | if (data) | 924 | if (data) |
876 | depth = per_cpu_ptr(data, iter->cpu)->depth; | 925 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
877 | 926 | ||
878 | if (print_graph_prologue(iter, s, 0, 0)) | 927 | if (print_graph_prologue(iter, s, 0, 0)) |
879 | return TRACE_TYPE_PARTIAL_LINE; | 928 | return TRACE_TYPE_PARTIAL_LINE; |
@@ -941,8 +990,33 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
941 | enum print_line_t | 990 | enum print_line_t |
942 | print_graph_function(struct trace_iterator *iter) | 991 | print_graph_function(struct trace_iterator *iter) |
943 | { | 992 | { |
993 | struct ftrace_graph_ent_entry *field; | ||
994 | struct fgraph_data *data = iter->private; | ||
944 | struct trace_entry *entry = iter->ent; | 995 | struct trace_entry *entry = iter->ent; |
945 | struct trace_seq *s = &iter->seq; | 996 | struct trace_seq *s = &iter->seq; |
997 | int cpu = iter->cpu; | ||
998 | int ret; | ||
999 | |||
1000 | if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) { | ||
1001 | per_cpu_ptr(data->cpu_data, cpu)->ignore = 0; | ||
1002 | return TRACE_TYPE_HANDLED; | ||
1003 | } | ||
1004 | |||
1005 | /* | ||
1006 | * If the last output failed, there's a possibility we need | ||
1007 | * to print out the missing entry which would never go out. | ||
1008 | */ | ||
1009 | if (data && data->failed) { | ||
1010 | field = &data->ent; | ||
1011 | iter->cpu = data->cpu; | ||
1012 | ret = print_graph_entry(field, s, iter); | ||
1013 | if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) { | ||
1014 | per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1; | ||
1015 | ret = TRACE_TYPE_NO_CONSUME; | ||
1016 | } | ||
1017 | iter->cpu = cpu; | ||
1018 | return ret; | ||
1019 | } | ||
946 | 1020 | ||
947 | switch (entry->type) { | 1021 | switch (entry->type) { |
948 | case TRACE_GRAPH_ENT: { | 1022 | case TRACE_GRAPH_ENT: { |
@@ -952,7 +1026,7 @@ print_graph_function(struct trace_iterator *iter) | |||
952 | * sizeof(struct ftrace_graph_ent_entry) is very small, | 1026 | * sizeof(struct ftrace_graph_ent_entry) is very small, |
953 | * it can be safely saved at the stack. | 1027 | * it can be safely saved at the stack. |
954 | */ | 1028 | */ |
955 | struct ftrace_graph_ent_entry *field, saved; | 1029 | struct ftrace_graph_ent_entry saved; |
956 | trace_assign_type(field, entry); | 1030 | trace_assign_type(field, entry); |
957 | saved = *field; | 1031 | saved = *field; |
958 | return print_graph_entry(&saved, s, iter); | 1032 | return print_graph_entry(&saved, s, iter); |
@@ -1030,31 +1104,54 @@ static void print_graph_headers(struct seq_file *s) | |||
1030 | static void graph_trace_open(struct trace_iterator *iter) | 1104 | static void graph_trace_open(struct trace_iterator *iter) |
1031 | { | 1105 | { |
1032 | /* pid and depth on the last trace processed */ | 1106 | /* pid and depth on the last trace processed */ |
1033 | struct fgraph_data *data = alloc_percpu(struct fgraph_data); | 1107 | struct fgraph_data *data; |
1034 | int cpu; | 1108 | int cpu; |
1035 | 1109 | ||
1110 | iter->private = NULL; | ||
1111 | |||
1112 | data = kzalloc(sizeof(*data), GFP_KERNEL); | ||
1036 | if (!data) | 1113 | if (!data) |
1037 | pr_warning("function graph tracer: not enough memory\n"); | 1114 | goto out_err; |
1038 | else | 1115 | |
1039 | for_each_possible_cpu(cpu) { | 1116 | data->cpu_data = alloc_percpu(struct fgraph_cpu_data); |
1040 | pid_t *pid = &(per_cpu_ptr(data, cpu)->last_pid); | 1117 | if (!data->cpu_data) |
1041 | int *depth = &(per_cpu_ptr(data, cpu)->depth); | 1118 | goto out_err_free; |
1042 | *pid = -1; | 1119 | |
1043 | *depth = 0; | 1120 | for_each_possible_cpu(cpu) { |
1044 | } | 1121 | pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
1122 | int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth); | ||
1123 | int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore); | ||
1124 | *pid = -1; | ||
1125 | *depth = 0; | ||
1126 | *ignore = 0; | ||
1127 | } | ||
1045 | 1128 | ||
1046 | iter->private = data; | 1129 | iter->private = data; |
1130 | |||
1131 | return; | ||
1132 | |||
1133 | out_err_free: | ||
1134 | kfree(data); | ||
1135 | out_err: | ||
1136 | pr_warning("function graph tracer: not enough memory\n"); | ||
1047 | } | 1137 | } |
1048 | 1138 | ||
1049 | static void graph_trace_close(struct trace_iterator *iter) | 1139 | static void graph_trace_close(struct trace_iterator *iter) |
1050 | { | 1140 | { |
1051 | free_percpu(iter->private); | 1141 | struct fgraph_data *data = iter->private; |
1142 | |||
1143 | if (data) { | ||
1144 | free_percpu(data->cpu_data); | ||
1145 | kfree(data); | ||
1146 | } | ||
1052 | } | 1147 | } |
1053 | 1148 | ||
1054 | static struct tracer graph_trace __read_mostly = { | 1149 | static struct tracer graph_trace __read_mostly = { |
1055 | .name = "function_graph", | 1150 | .name = "function_graph", |
1056 | .open = graph_trace_open, | 1151 | .open = graph_trace_open, |
1152 | .pipe_open = graph_trace_open, | ||
1057 | .close = graph_trace_close, | 1153 | .close = graph_trace_close, |
1154 | .pipe_close = graph_trace_close, | ||
1058 | .wait_pipe = poll_wait_pipe, | 1155 | .wait_pipe = poll_wait_pipe, |
1059 | .init = graph_trace_init, | 1156 | .init = graph_trace_init, |
1060 | .reset = graph_trace_reset, | 1157 | .reset = graph_trace_reset, |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index aff5f80b59b8..b52d397e57eb 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -606,23 +606,22 @@ static int create_trace_probe(int argc, char **argv) | |||
606 | */ | 606 | */ |
607 | struct trace_probe *tp; | 607 | struct trace_probe *tp; |
608 | int i, ret = 0; | 608 | int i, ret = 0; |
609 | int is_return = 0; | 609 | int is_return = 0, is_delete = 0; |
610 | char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; | 610 | char *symbol = NULL, *event = NULL, *arg = NULL, *group = NULL; |
611 | unsigned long offset = 0; | 611 | unsigned long offset = 0; |
612 | void *addr = NULL; | 612 | void *addr = NULL; |
613 | char buf[MAX_EVENT_NAME_LEN]; | 613 | char buf[MAX_EVENT_NAME_LEN]; |
614 | 614 | ||
615 | if (argc < 2) { | 615 | /* argc must be >= 1 */ |
616 | pr_info("Probe point is not specified.\n"); | ||
617 | return -EINVAL; | ||
618 | } | ||
619 | |||
620 | if (argv[0][0] == 'p') | 616 | if (argv[0][0] == 'p') |
621 | is_return = 0; | 617 | is_return = 0; |
622 | else if (argv[0][0] == 'r') | 618 | else if (argv[0][0] == 'r') |
623 | is_return = 1; | 619 | is_return = 1; |
620 | else if (argv[0][0] == '-') | ||
621 | is_delete = 1; | ||
624 | else { | 622 | else { |
625 | pr_info("Probe definition must be started with 'p' or 'r'.\n"); | 623 | pr_info("Probe definition must be started with 'p', 'r' or" |
624 | " '-'.\n"); | ||
626 | return -EINVAL; | 625 | return -EINVAL; |
627 | } | 626 | } |
628 | 627 | ||
@@ -642,7 +641,29 @@ static int create_trace_probe(int argc, char **argv) | |||
642 | return -EINVAL; | 641 | return -EINVAL; |
643 | } | 642 | } |
644 | } | 643 | } |
644 | if (!group) | ||
645 | group = KPROBE_EVENT_SYSTEM; | ||
645 | 646 | ||
647 | if (is_delete) { | ||
648 | if (!event) { | ||
649 | pr_info("Delete command needs an event name.\n"); | ||
650 | return -EINVAL; | ||
651 | } | ||
652 | tp = find_probe_event(event, group); | ||
653 | if (!tp) { | ||
654 | pr_info("Event %s/%s doesn't exist.\n", group, event); | ||
655 | return -ENOENT; | ||
656 | } | ||
657 | /* delete an event */ | ||
658 | unregister_trace_probe(tp); | ||
659 | free_trace_probe(tp); | ||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | if (argc < 2) { | ||
664 | pr_info("Probe point is not specified.\n"); | ||
665 | return -EINVAL; | ||
666 | } | ||
646 | if (isdigit(argv[1][0])) { | 667 | if (isdigit(argv[1][0])) { |
647 | if (is_return) { | 668 | if (is_return) { |
648 | pr_info("Return probe point must be a symbol.\n"); | 669 | pr_info("Return probe point must be a symbol.\n"); |
@@ -671,8 +692,6 @@ static int create_trace_probe(int argc, char **argv) | |||
671 | argc -= 2; argv += 2; | 692 | argc -= 2; argv += 2; |
672 | 693 | ||
673 | /* setup a probe */ | 694 | /* setup a probe */ |
674 | if (!group) | ||
675 | group = KPROBE_EVENT_SYSTEM; | ||
676 | if (!event) { | 695 | if (!event) { |
677 | /* Make a new event name */ | 696 | /* Make a new event name */ |
678 | if (symbol) | 697 | if (symbol) |
@@ -1114,7 +1133,7 @@ static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1114 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1133 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
1115 | 1134 | ||
1116 | ret = trace_define_common_fields(event_call); | 1135 | ret = trace_define_common_fields(event_call); |
1117 | if (!ret) | 1136 | if (ret) |
1118 | return ret; | 1137 | return ret; |
1119 | 1138 | ||
1120 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1139 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
@@ -1132,7 +1151,7 @@ static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | |||
1132 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | 1151 | struct trace_probe *tp = (struct trace_probe *)event_call->data; |
1133 | 1152 | ||
1134 | ret = trace_define_common_fields(event_call); | 1153 | ret = trace_define_common_fields(event_call); |
1135 | if (!ret) | 1154 | if (ret) |
1136 | return ret; | 1155 | return ret; |
1137 | 1156 | ||
1138 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 1157 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
diff --git a/kernel/trace/trace_ksym.c b/kernel/trace/trace_ksym.c index ddfa0fd43bc0..acb87d4a4ac1 100644 --- a/kernel/trace/trace_ksym.c +++ b/kernel/trace/trace_ksym.c | |||
@@ -79,11 +79,12 @@ void ksym_collect_stats(unsigned long hbp_hit_addr) | |||
79 | } | 79 | } |
80 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ | 80 | #endif /* CONFIG_PROFILE_KSYM_TRACER */ |
81 | 81 | ||
82 | void ksym_hbp_handler(struct perf_event *hbp, void *data) | 82 | void ksym_hbp_handler(struct perf_event *hbp, int nmi, |
83 | struct perf_sample_data *data, | ||
84 | struct pt_regs *regs) | ||
83 | { | 85 | { |
84 | struct ring_buffer_event *event; | 86 | struct ring_buffer_event *event; |
85 | struct ksym_trace_entry *entry; | 87 | struct ksym_trace_entry *entry; |
86 | struct pt_regs *regs = data; | ||
87 | struct ring_buffer *buffer; | 88 | struct ring_buffer *buffer; |
88 | int pc; | 89 | int pc; |
89 | 90 | ||
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index b6c12c6a1bcd..8e46b3323cdc 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
@@ -23,13 +23,21 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | |||
23 | 23 | ||
24 | static int next_event_type = __TRACE_LAST_TYPE + 1; | 24 | static int next_event_type = __TRACE_LAST_TYPE + 1; |
25 | 25 | ||
26 | void trace_print_seq(struct seq_file *m, struct trace_seq *s) | 26 | int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
27 | { | 27 | { |
28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; | 28 | int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len; |
29 | int ret; | ||
30 | |||
31 | ret = seq_write(m, s->buffer, len); | ||
29 | 32 | ||
30 | seq_write(m, s->buffer, len); | 33 | /* |
34 | * Only reset this buffer if we successfully wrote to the | ||
35 | * seq_file buffer. | ||
36 | */ | ||
37 | if (!ret) | ||
38 | trace_seq_init(s); | ||
31 | 39 | ||
32 | trace_seq_init(s); | 40 | return ret; |
33 | } | 41 | } |
34 | 42 | ||
35 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | 43 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) |
@@ -85,7 +93,7 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
85 | va_list ap; | 93 | va_list ap; |
86 | int ret; | 94 | int ret; |
87 | 95 | ||
88 | if (!len) | 96 | if (s->full || !len) |
89 | return 0; | 97 | return 0; |
90 | 98 | ||
91 | va_start(ap, fmt); | 99 | va_start(ap, fmt); |
@@ -93,8 +101,10 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | |||
93 | va_end(ap); | 101 | va_end(ap); |
94 | 102 | ||
95 | /* If we can't write it all, don't bother writing anything */ | 103 | /* If we can't write it all, don't bother writing anything */ |
96 | if (ret >= len) | 104 | if (ret >= len) { |
105 | s->full = 1; | ||
97 | return 0; | 106 | return 0; |
107 | } | ||
98 | 108 | ||
99 | s->len += ret; | 109 | s->len += ret; |
100 | 110 | ||
@@ -119,14 +129,16 @@ trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | |||
119 | int len = (PAGE_SIZE - 1) - s->len; | 129 | int len = (PAGE_SIZE - 1) - s->len; |
120 | int ret; | 130 | int ret; |
121 | 131 | ||
122 | if (!len) | 132 | if (s->full || !len) |
123 | return 0; | 133 | return 0; |
124 | 134 | ||
125 | ret = vsnprintf(s->buffer + s->len, len, fmt, args); | 135 | ret = vsnprintf(s->buffer + s->len, len, fmt, args); |
126 | 136 | ||
127 | /* If we can't write it all, don't bother writing anything */ | 137 | /* If we can't write it all, don't bother writing anything */ |
128 | if (ret >= len) | 138 | if (ret >= len) { |
139 | s->full = 1; | ||
129 | return 0; | 140 | return 0; |
141 | } | ||
130 | 142 | ||
131 | s->len += ret; | 143 | s->len += ret; |
132 | 144 | ||
@@ -139,14 +151,16 @@ int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | |||
139 | int len = (PAGE_SIZE - 1) - s->len; | 151 | int len = (PAGE_SIZE - 1) - s->len; |
140 | int ret; | 152 | int ret; |
141 | 153 | ||
142 | if (!len) | 154 | if (s->full || !len) |
143 | return 0; | 155 | return 0; |
144 | 156 | ||
145 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | 157 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); |
146 | 158 | ||
147 | /* If we can't write it all, don't bother writing anything */ | 159 | /* If we can't write it all, don't bother writing anything */ |
148 | if (ret >= len) | 160 | if (ret >= len) { |
161 | s->full = 1; | ||
149 | return 0; | 162 | return 0; |
163 | } | ||
150 | 164 | ||
151 | s->len += ret; | 165 | s->len += ret; |
152 | 166 | ||
@@ -167,8 +181,13 @@ int trace_seq_puts(struct trace_seq *s, const char *str) | |||
167 | { | 181 | { |
168 | int len = strlen(str); | 182 | int len = strlen(str); |
169 | 183 | ||
170 | if (len > ((PAGE_SIZE - 1) - s->len)) | 184 | if (s->full) |
185 | return 0; | ||
186 | |||
187 | if (len > ((PAGE_SIZE - 1) - s->len)) { | ||
188 | s->full = 1; | ||
171 | return 0; | 189 | return 0; |
190 | } | ||
172 | 191 | ||
173 | memcpy(s->buffer + s->len, str, len); | 192 | memcpy(s->buffer + s->len, str, len); |
174 | s->len += len; | 193 | s->len += len; |
@@ -178,9 +197,14 @@ int trace_seq_puts(struct trace_seq *s, const char *str) | |||
178 | 197 | ||
179 | int trace_seq_putc(struct trace_seq *s, unsigned char c) | 198 | int trace_seq_putc(struct trace_seq *s, unsigned char c) |
180 | { | 199 | { |
181 | if (s->len >= (PAGE_SIZE - 1)) | 200 | if (s->full) |
182 | return 0; | 201 | return 0; |
183 | 202 | ||
203 | if (s->len >= (PAGE_SIZE - 1)) { | ||
204 | s->full = 1; | ||
205 | return 0; | ||
206 | } | ||
207 | |||
184 | s->buffer[s->len++] = c; | 208 | s->buffer[s->len++] = c; |
185 | 209 | ||
186 | return 1; | 210 | return 1; |
@@ -188,9 +212,14 @@ int trace_seq_putc(struct trace_seq *s, unsigned char c) | |||
188 | 212 | ||
189 | int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) | 213 | int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) |
190 | { | 214 | { |
191 | if (len > ((PAGE_SIZE - 1) - s->len)) | 215 | if (s->full) |
192 | return 0; | 216 | return 0; |
193 | 217 | ||
218 | if (len > ((PAGE_SIZE - 1) - s->len)) { | ||
219 | s->full = 1; | ||
220 | return 0; | ||
221 | } | ||
222 | |||
194 | memcpy(s->buffer + s->len, mem, len); | 223 | memcpy(s->buffer + s->len, mem, len); |
195 | s->len += len; | 224 | s->len += len; |
196 | 225 | ||
@@ -203,6 +232,9 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len) | |||
203 | const unsigned char *data = mem; | 232 | const unsigned char *data = mem; |
204 | int i, j; | 233 | int i, j; |
205 | 234 | ||
235 | if (s->full) | ||
236 | return 0; | ||
237 | |||
206 | #ifdef __BIG_ENDIAN | 238 | #ifdef __BIG_ENDIAN |
207 | for (i = 0, j = 0; i < len; i++) { | 239 | for (i = 0, j = 0; i < len; i++) { |
208 | #else | 240 | #else |
@@ -220,8 +252,13 @@ void *trace_seq_reserve(struct trace_seq *s, size_t len) | |||
220 | { | 252 | { |
221 | void *ret; | 253 | void *ret; |
222 | 254 | ||
223 | if (len > ((PAGE_SIZE - 1) - s->len)) | 255 | if (s->full) |
256 | return 0; | ||
257 | |||
258 | if (len > ((PAGE_SIZE - 1) - s->len)) { | ||
259 | s->full = 1; | ||
224 | return NULL; | 260 | return NULL; |
261 | } | ||
225 | 262 | ||
226 | ret = s->buffer + s->len; | 263 | ret = s->buffer + s->len; |
227 | s->len += len; | 264 | s->len += len; |
@@ -233,8 +270,14 @@ int trace_seq_path(struct trace_seq *s, struct path *path) | |||
233 | { | 270 | { |
234 | unsigned char *p; | 271 | unsigned char *p; |
235 | 272 | ||
236 | if (s->len >= (PAGE_SIZE - 1)) | 273 | if (s->full) |
274 | return 0; | ||
275 | |||
276 | if (s->len >= (PAGE_SIZE - 1)) { | ||
277 | s->full = 1; | ||
237 | return 0; | 278 | return 0; |
279 | } | ||
280 | |||
238 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | 281 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); |
239 | if (!IS_ERR(p)) { | 282 | if (!IS_ERR(p)) { |
240 | p = mangle_path(s->buffer + s->len, p, "\n"); | 283 | p = mangle_path(s->buffer + s->len, p, "\n"); |
@@ -247,6 +290,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path) | |||
247 | return 1; | 290 | return 1; |
248 | } | 291 | } |
249 | 292 | ||
293 | s->full = 1; | ||
250 | return 0; | 294 | return 0; |
251 | } | 295 | } |
252 | 296 | ||
@@ -373,6 +417,9 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | |||
373 | unsigned long vmstart = 0; | 417 | unsigned long vmstart = 0; |
374 | int ret = 1; | 418 | int ret = 1; |
375 | 419 | ||
420 | if (s->full) | ||
421 | return 0; | ||
422 | |||
376 | if (mm) { | 423 | if (mm) { |
377 | const struct vm_area_struct *vma; | 424 | const struct vm_area_struct *vma; |
378 | 425 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 67e526b6ae81..dee48658805c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -68,6 +68,116 @@ struct workqueue_struct { | |||
68 | #endif | 68 | #endif |
69 | }; | 69 | }; |
70 | 70 | ||
71 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
72 | |||
73 | static struct debug_obj_descr work_debug_descr; | ||
74 | |||
75 | /* | ||
76 | * fixup_init is called when: | ||
77 | * - an active object is initialized | ||
78 | */ | ||
79 | static int work_fixup_init(void *addr, enum debug_obj_state state) | ||
80 | { | ||
81 | struct work_struct *work = addr; | ||
82 | |||
83 | switch (state) { | ||
84 | case ODEBUG_STATE_ACTIVE: | ||
85 | cancel_work_sync(work); | ||
86 | debug_object_init(work, &work_debug_descr); | ||
87 | return 1; | ||
88 | default: | ||
89 | return 0; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * fixup_activate is called when: | ||
95 | * - an active object is activated | ||
96 | * - an unknown object is activated (might be a statically initialized object) | ||
97 | */ | ||
98 | static int work_fixup_activate(void *addr, enum debug_obj_state state) | ||
99 | { | ||
100 | struct work_struct *work = addr; | ||
101 | |||
102 | switch (state) { | ||
103 | |||
104 | case ODEBUG_STATE_NOTAVAILABLE: | ||
105 | /* | ||
106 | * This is not really a fixup. The work struct was | ||
107 | * statically initialized. We just make sure that it | ||
108 | * is tracked in the object tracker. | ||
109 | */ | ||
110 | if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { | ||
111 | debug_object_init(work, &work_debug_descr); | ||
112 | debug_object_activate(work, &work_debug_descr); | ||
113 | return 0; | ||
114 | } | ||
115 | WARN_ON_ONCE(1); | ||
116 | return 0; | ||
117 | |||
118 | case ODEBUG_STATE_ACTIVE: | ||
119 | WARN_ON(1); | ||
120 | |||
121 | default: | ||
122 | return 0; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * fixup_free is called when: | ||
128 | * - an active object is freed | ||
129 | */ | ||
130 | static int work_fixup_free(void *addr, enum debug_obj_state state) | ||
131 | { | ||
132 | struct work_struct *work = addr; | ||
133 | |||
134 | switch (state) { | ||
135 | case ODEBUG_STATE_ACTIVE: | ||
136 | cancel_work_sync(work); | ||
137 | debug_object_free(work, &work_debug_descr); | ||
138 | return 1; | ||
139 | default: | ||
140 | return 0; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static struct debug_obj_descr work_debug_descr = { | ||
145 | .name = "work_struct", | ||
146 | .fixup_init = work_fixup_init, | ||
147 | .fixup_activate = work_fixup_activate, | ||
148 | .fixup_free = work_fixup_free, | ||
149 | }; | ||
150 | |||
151 | static inline void debug_work_activate(struct work_struct *work) | ||
152 | { | ||
153 | debug_object_activate(work, &work_debug_descr); | ||
154 | } | ||
155 | |||
156 | static inline void debug_work_deactivate(struct work_struct *work) | ||
157 | { | ||
158 | debug_object_deactivate(work, &work_debug_descr); | ||
159 | } | ||
160 | |||
161 | void __init_work(struct work_struct *work, int onstack) | ||
162 | { | ||
163 | if (onstack) | ||
164 | debug_object_init_on_stack(work, &work_debug_descr); | ||
165 | else | ||
166 | debug_object_init(work, &work_debug_descr); | ||
167 | } | ||
168 | EXPORT_SYMBOL_GPL(__init_work); | ||
169 | |||
170 | void destroy_work_on_stack(struct work_struct *work) | ||
171 | { | ||
172 | debug_object_free(work, &work_debug_descr); | ||
173 | } | ||
174 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | ||
175 | |||
176 | #else | ||
177 | static inline void debug_work_activate(struct work_struct *work) { } | ||
178 | static inline void debug_work_deactivate(struct work_struct *work) { } | ||
179 | #endif | ||
180 | |||
71 | /* Serializes the accesses to the list of workqueues. */ | 181 | /* Serializes the accesses to the list of workqueues. */ |
72 | static DEFINE_SPINLOCK(workqueue_lock); | 182 | static DEFINE_SPINLOCK(workqueue_lock); |
73 | static LIST_HEAD(workqueues); | 183 | static LIST_HEAD(workqueues); |
@@ -145,6 +255,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
145 | { | 255 | { |
146 | unsigned long flags; | 256 | unsigned long flags; |
147 | 257 | ||
258 | debug_work_activate(work); | ||
148 | spin_lock_irqsave(&cwq->lock, flags); | 259 | spin_lock_irqsave(&cwq->lock, flags); |
149 | insert_work(cwq, work, &cwq->worklist); | 260 | insert_work(cwq, work, &cwq->worklist); |
150 | spin_unlock_irqrestore(&cwq->lock, flags); | 261 | spin_unlock_irqrestore(&cwq->lock, flags); |
@@ -280,6 +391,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
280 | struct lockdep_map lockdep_map = work->lockdep_map; | 391 | struct lockdep_map lockdep_map = work->lockdep_map; |
281 | #endif | 392 | #endif |
282 | trace_workqueue_execution(cwq->thread, work); | 393 | trace_workqueue_execution(cwq->thread, work); |
394 | debug_work_deactivate(work); | ||
283 | cwq->current_work = work; | 395 | cwq->current_work = work; |
284 | list_del_init(cwq->worklist.next); | 396 | list_del_init(cwq->worklist.next); |
285 | spin_unlock_irq(&cwq->lock); | 397 | spin_unlock_irq(&cwq->lock); |
@@ -350,11 +462,18 @@ static void wq_barrier_func(struct work_struct *work) | |||
350 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 462 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
351 | struct wq_barrier *barr, struct list_head *head) | 463 | struct wq_barrier *barr, struct list_head *head) |
352 | { | 464 | { |
353 | INIT_WORK(&barr->work, wq_barrier_func); | 465 | /* |
466 | * debugobject calls are safe here even with cwq->lock locked | ||
467 | * as we know for sure that this will not trigger any of the | ||
468 | * checks and call back into the fixup functions where we | ||
469 | * might deadlock. | ||
470 | */ | ||
471 | INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); | ||
354 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 472 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
355 | 473 | ||
356 | init_completion(&barr->done); | 474 | init_completion(&barr->done); |
357 | 475 | ||
476 | debug_work_activate(&barr->work); | ||
358 | insert_work(cwq, &barr->work, head); | 477 | insert_work(cwq, &barr->work, head); |
359 | } | 478 | } |
360 | 479 | ||
@@ -372,8 +491,10 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
372 | } | 491 | } |
373 | spin_unlock_irq(&cwq->lock); | 492 | spin_unlock_irq(&cwq->lock); |
374 | 493 | ||
375 | if (active) | 494 | if (active) { |
376 | wait_for_completion(&barr.done); | 495 | wait_for_completion(&barr.done); |
496 | destroy_work_on_stack(&barr.work); | ||
497 | } | ||
377 | 498 | ||
378 | return active; | 499 | return active; |
379 | } | 500 | } |
@@ -451,6 +572,7 @@ out: | |||
451 | return 0; | 572 | return 0; |
452 | 573 | ||
453 | wait_for_completion(&barr.done); | 574 | wait_for_completion(&barr.done); |
575 | destroy_work_on_stack(&barr.work); | ||
454 | return 1; | 576 | return 1; |
455 | } | 577 | } |
456 | EXPORT_SYMBOL_GPL(flush_work); | 578 | EXPORT_SYMBOL_GPL(flush_work); |
@@ -485,6 +607,7 @@ static int try_to_grab_pending(struct work_struct *work) | |||
485 | */ | 607 | */ |
486 | smp_rmb(); | 608 | smp_rmb(); |
487 | if (cwq == get_wq_data(work)) { | 609 | if (cwq == get_wq_data(work)) { |
610 | debug_work_deactivate(work); | ||
488 | list_del_init(&work->entry); | 611 | list_del_init(&work->entry); |
489 | ret = 1; | 612 | ret = 1; |
490 | } | 613 | } |
@@ -507,8 +630,10 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | |||
507 | } | 630 | } |
508 | spin_unlock_irq(&cwq->lock); | 631 | spin_unlock_irq(&cwq->lock); |
509 | 632 | ||
510 | if (unlikely(running)) | 633 | if (unlikely(running)) { |
511 | wait_for_completion(&barr.done); | 634 | wait_for_completion(&barr.done); |
635 | destroy_work_on_stack(&barr.work); | ||
636 | } | ||
512 | } | 637 | } |
513 | 638 | ||
514 | static void wait_on_work(struct work_struct *work) | 639 | static void wait_on_work(struct work_struct *work) |