diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/capability.c | 24 | ||||
-rw-r--r-- | kernel/events/core.c | 4 | ||||
-rw-r--r-- | kernel/events/internal.h | 2 | ||||
-rw-r--r-- | kernel/events/ring_buffer.c | 22 | ||||
-rw-r--r-- | kernel/hrtimer.c | 3 | ||||
-rw-r--r-- | kernel/kthread.c | 52 | ||||
-rw-r--r-- | kernel/sched/clock.c | 26 | ||||
-rw-r--r-- | kernel/sched/core.c | 8 | ||||
-rw-r--r-- | kernel/sched/cputime.c | 2 | ||||
-rw-r--r-- | kernel/smpboot.c | 14 | ||||
-rw-r--r-- | kernel/sys.c | 3 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 41 | ||||
-rw-r--r-- | kernel/trace/trace.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 2 |
14 files changed, 144 insertions, 63 deletions
diff --git a/kernel/capability.c b/kernel/capability.c index 493d97259484..f6c2ce5701e1 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -393,6 +393,30 @@ bool ns_capable(struct user_namespace *ns, int cap) | |||
393 | EXPORT_SYMBOL(ns_capable); | 393 | EXPORT_SYMBOL(ns_capable); |
394 | 394 | ||
395 | /** | 395 | /** |
396 | * file_ns_capable - Determine if the file's opener had a capability in effect | ||
397 | * @file: The file we want to check | ||
398 | * @ns: The usernamespace we want the capability in | ||
399 | * @cap: The capability to be tested for | ||
400 | * | ||
401 | * Return true if task that opened the file had a capability in effect | ||
402 | * when the file was opened. | ||
403 | * | ||
404 | * This does not set PF_SUPERPRIV because the caller may not | ||
405 | * actually be privileged. | ||
406 | */ | ||
407 | bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap) | ||
408 | { | ||
409 | if (WARN_ON_ONCE(!cap_valid(cap))) | ||
410 | return false; | ||
411 | |||
412 | if (security_capable(file->f_cred, ns, cap) == 0) | ||
413 | return true; | ||
414 | |||
415 | return false; | ||
416 | } | ||
417 | EXPORT_SYMBOL(file_ns_capable); | ||
418 | |||
419 | /** | ||
396 | * capable - Determine if the current task has a superior capability in effect | 420 | * capable - Determine if the current task has a superior capability in effect |
397 | * @cap: The capability to be tested for | 421 | * @cap: The capability to be tested for |
398 | * | 422 | * |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 59412d037eed..7e0962ed7f8a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4737,7 +4737,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
4737 | } else { | 4737 | } else { |
4738 | if (arch_vma_name(mmap_event->vma)) { | 4738 | if (arch_vma_name(mmap_event->vma)) { |
4739 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), | 4739 | name = strncpy(tmp, arch_vma_name(mmap_event->vma), |
4740 | sizeof(tmp)); | 4740 | sizeof(tmp) - 1); |
4741 | tmp[sizeof(tmp) - 1] = '\0'; | ||
4741 | goto got_name; | 4742 | goto got_name; |
4742 | } | 4743 | } |
4743 | 4744 | ||
@@ -5986,6 +5987,7 @@ skip_type: | |||
5986 | if (pmu->pmu_cpu_context) | 5987 | if (pmu->pmu_cpu_context) |
5987 | goto got_cpu_context; | 5988 | goto got_cpu_context; |
5988 | 5989 | ||
5990 | ret = -ENOMEM; | ||
5989 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); | 5991 | pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); |
5990 | if (!pmu->pmu_cpu_context) | 5992 | if (!pmu->pmu_cpu_context) |
5991 | goto free_dev; | 5993 | goto free_dev; |
diff --git a/kernel/events/internal.h b/kernel/events/internal.h index d56a64c99a8b..eb675c4d59df 100644 --- a/kernel/events/internal.h +++ b/kernel/events/internal.h | |||
@@ -16,7 +16,7 @@ struct ring_buffer { | |||
16 | int page_order; /* allocation order */ | 16 | int page_order; /* allocation order */ |
17 | #endif | 17 | #endif |
18 | int nr_pages; /* nr of data pages */ | 18 | int nr_pages; /* nr of data pages */ |
19 | int writable; /* are we writable */ | 19 | int overwrite; /* can overwrite itself */ |
20 | 20 | ||
21 | atomic_t poll; /* POLL_ for wakeups */ | 21 | atomic_t poll; /* POLL_ for wakeups */ |
22 | 22 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 23cb34ff3973..97fddb09762b 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -18,12 +18,24 @@ | |||
18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, | 18 | static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, |
19 | unsigned long offset, unsigned long head) | 19 | unsigned long offset, unsigned long head) |
20 | { | 20 | { |
21 | unsigned long mask; | 21 | unsigned long sz = perf_data_size(rb); |
22 | unsigned long mask = sz - 1; | ||
22 | 23 | ||
23 | if (!rb->writable) | 24 | /* |
25 | * check if user-writable | ||
26 | * overwrite : over-write its own tail | ||
27 | * !overwrite: buffer possibly drops events. | ||
28 | */ | ||
29 | if (rb->overwrite) | ||
24 | return true; | 30 | return true; |
25 | 31 | ||
26 | mask = perf_data_size(rb) - 1; | 32 | /* |
33 | * verify that payload is not bigger than buffer | ||
34 | * otherwise masking logic may fail to detect | ||
35 | * the "not enough space" condition | ||
36 | */ | ||
37 | if ((head - offset) > sz) | ||
38 | return false; | ||
27 | 39 | ||
28 | offset = (offset - tail) & mask; | 40 | offset = (offset - tail) & mask; |
29 | head = (head - tail) & mask; | 41 | head = (head - tail) & mask; |
@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) | |||
212 | rb->watermark = max_size / 2; | 224 | rb->watermark = max_size / 2; |
213 | 225 | ||
214 | if (flags & RING_BUFFER_WRITABLE) | 226 | if (flags & RING_BUFFER_WRITABLE) |
215 | rb->writable = 1; | 227 | rb->overwrite = 0; |
228 | else | ||
229 | rb->overwrite = 1; | ||
216 | 230 | ||
217 | atomic_set(&rb->refcount, 1); | 231 | atomic_set(&rb->refcount, 1); |
218 | 232 | ||
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index cc47812d3feb..14be27feda49 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -63,6 +63,7 @@ | |||
63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | 63 | DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = |
64 | { | 64 | { |
65 | 65 | ||
66 | .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), | ||
66 | .clock_base = | 67 | .clock_base = |
67 | { | 68 | { |
68 | { | 69 | { |
@@ -1642,8 +1643,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1642 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1643 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1643 | int i; | 1644 | int i; |
1644 | 1645 | ||
1645 | raw_spin_lock_init(&cpu_base->lock); | ||
1646 | |||
1647 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1646 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1648 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1647 | cpu_base->clock_base[i].cpu_base = cpu_base; |
1649 | timerqueue_init_head(&cpu_base->clock_base[i].active); | 1648 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 691dc2ef9baf..9eb7fed0bbaa 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -124,12 +124,12 @@ void *kthread_data(struct task_struct *task) | |||
124 | 124 | ||
125 | static void __kthread_parkme(struct kthread *self) | 125 | static void __kthread_parkme(struct kthread *self) |
126 | { | 126 | { |
127 | __set_current_state(TASK_INTERRUPTIBLE); | 127 | __set_current_state(TASK_PARKED); |
128 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { | 128 | while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { |
129 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) | 129 | if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) |
130 | complete(&self->parked); | 130 | complete(&self->parked); |
131 | schedule(); | 131 | schedule(); |
132 | __set_current_state(TASK_INTERRUPTIBLE); | 132 | __set_current_state(TASK_PARKED); |
133 | } | 133 | } |
134 | clear_bit(KTHREAD_IS_PARKED, &self->flags); | 134 | clear_bit(KTHREAD_IS_PARKED, &self->flags); |
135 | __set_current_state(TASK_RUNNING); | 135 | __set_current_state(TASK_RUNNING); |
@@ -256,8 +256,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), | |||
256 | } | 256 | } |
257 | EXPORT_SYMBOL(kthread_create_on_node); | 257 | EXPORT_SYMBOL(kthread_create_on_node); |
258 | 258 | ||
259 | static void __kthread_bind(struct task_struct *p, unsigned int cpu) | 259 | static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state) |
260 | { | 260 | { |
261 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
262 | if (!wait_task_inactive(p, state)) { | ||
263 | WARN_ON(1); | ||
264 | return; | ||
265 | } | ||
261 | /* It's safe because the task is inactive. */ | 266 | /* It's safe because the task is inactive. */ |
262 | do_set_cpus_allowed(p, cpumask_of(cpu)); | 267 | do_set_cpus_allowed(p, cpumask_of(cpu)); |
263 | p->flags |= PF_THREAD_BOUND; | 268 | p->flags |= PF_THREAD_BOUND; |
@@ -274,12 +279,7 @@ static void __kthread_bind(struct task_struct *p, unsigned int cpu) | |||
274 | */ | 279 | */ |
275 | void kthread_bind(struct task_struct *p, unsigned int cpu) | 280 | void kthread_bind(struct task_struct *p, unsigned int cpu) |
276 | { | 281 | { |
277 | /* Must have done schedule() in kthread() before we set_task_cpu */ | 282 | __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE); |
278 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
279 | WARN_ON(1); | ||
280 | return; | ||
281 | } | ||
282 | __kthread_bind(p, cpu); | ||
283 | } | 283 | } |
284 | EXPORT_SYMBOL(kthread_bind); | 284 | EXPORT_SYMBOL(kthread_bind); |
285 | 285 | ||
@@ -324,6 +324,22 @@ static struct kthread *task_get_live_kthread(struct task_struct *k) | |||
324 | return NULL; | 324 | return NULL; |
325 | } | 325 | } |
326 | 326 | ||
327 | static void __kthread_unpark(struct task_struct *k, struct kthread *kthread) | ||
328 | { | ||
329 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | ||
330 | /* | ||
331 | * We clear the IS_PARKED bit here as we don't wait | ||
332 | * until the task has left the park code. So if we'd | ||
333 | * park before that happens we'd see the IS_PARKED bit | ||
334 | * which might be about to be cleared. | ||
335 | */ | ||
336 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
337 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
338 | __kthread_bind(k, kthread->cpu, TASK_PARKED); | ||
339 | wake_up_state(k, TASK_PARKED); | ||
340 | } | ||
341 | } | ||
342 | |||
327 | /** | 343 | /** |
328 | * kthread_unpark - unpark a thread created by kthread_create(). | 344 | * kthread_unpark - unpark a thread created by kthread_create(). |
329 | * @k: thread created by kthread_create(). | 345 | * @k: thread created by kthread_create(). |
@@ -336,20 +352,8 @@ void kthread_unpark(struct task_struct *k) | |||
336 | { | 352 | { |
337 | struct kthread *kthread = task_get_live_kthread(k); | 353 | struct kthread *kthread = task_get_live_kthread(k); |
338 | 354 | ||
339 | if (kthread) { | 355 | if (kthread) |
340 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 356 | __kthread_unpark(k, kthread); |
341 | /* | ||
342 | * We clear the IS_PARKED bit here as we don't wait | ||
343 | * until the task has left the park code. So if we'd | ||
344 | * park before that happens we'd see the IS_PARKED bit | ||
345 | * which might be about to be cleared. | ||
346 | */ | ||
347 | if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { | ||
348 | if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) | ||
349 | __kthread_bind(k, kthread->cpu); | ||
350 | wake_up_process(k); | ||
351 | } | ||
352 | } | ||
353 | put_task_struct(k); | 357 | put_task_struct(k); |
354 | } | 358 | } |
355 | 359 | ||
@@ -407,7 +411,7 @@ int kthread_stop(struct task_struct *k) | |||
407 | trace_sched_kthread_stop(k); | 411 | trace_sched_kthread_stop(k); |
408 | if (kthread) { | 412 | if (kthread) { |
409 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); | 413 | set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); |
410 | clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); | 414 | __kthread_unpark(k, kthread); |
411 | wake_up_process(k); | 415 | wake_up_process(k); |
412 | wait_for_completion(&kthread->exited); | 416 | wait_for_completion(&kthread->exited); |
413 | } | 417 | } |
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c index c685e31492df..c3ae1446461c 100644 --- a/kernel/sched/clock.c +++ b/kernel/sched/clock.c | |||
@@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd) | |||
176 | u64 this_clock, remote_clock; | 176 | u64 this_clock, remote_clock; |
177 | u64 *ptr, old_val, val; | 177 | u64 *ptr, old_val, val; |
178 | 178 | ||
179 | #if BITS_PER_LONG != 64 | ||
180 | again: | ||
181 | /* | ||
182 | * Careful here: The local and the remote clock values need to | ||
183 | * be read out atomic as we need to compare the values and | ||
184 | * then update either the local or the remote side. So the | ||
185 | * cmpxchg64 below only protects one readout. | ||
186 | * | ||
187 | * We must reread via sched_clock_local() in the retry case on | ||
188 | * 32bit as an NMI could use sched_clock_local() via the | ||
189 | * tracer and hit between the readout of | ||
190 | * the low32bit and the high 32bit portion. | ||
191 | */ | ||
192 | this_clock = sched_clock_local(my_scd); | ||
193 | /* | ||
194 | * We must enforce atomic readout on 32bit, otherwise the | ||
195 | * update on the remote cpu can hit inbetween the readout of | ||
196 | * the low32bit and the high 32bit portion. | ||
197 | */ | ||
198 | remote_clock = cmpxchg64(&scd->clock, 0, 0); | ||
199 | #else | ||
200 | /* | ||
201 | * On 64bit the read of [my]scd->clock is atomic versus the | ||
202 | * update, so we can avoid the above 32bit dance. | ||
203 | */ | ||
179 | sched_clock_local(my_scd); | 204 | sched_clock_local(my_scd); |
180 | again: | 205 | again: |
181 | this_clock = my_scd->clock; | 206 | this_clock = my_scd->clock; |
182 | remote_clock = scd->clock; | 207 | remote_clock = scd->clock; |
208 | #endif | ||
183 | 209 | ||
184 | /* | 210 | /* |
185 | * Use the opportunity that we have both locks | 211 | * Use the opportunity that we have both locks |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7f12624a393c..67d04651f44b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1498,8 +1498,10 @@ static void try_to_wake_up_local(struct task_struct *p) | |||
1498 | { | 1498 | { |
1499 | struct rq *rq = task_rq(p); | 1499 | struct rq *rq = task_rq(p); |
1500 | 1500 | ||
1501 | BUG_ON(rq != this_rq()); | 1501 | if (WARN_ON_ONCE(rq != this_rq()) || |
1502 | BUG_ON(p == current); | 1502 | WARN_ON_ONCE(p == current)) |
1503 | return; | ||
1504 | |||
1503 | lockdep_assert_held(&rq->lock); | 1505 | lockdep_assert_held(&rq->lock); |
1504 | 1506 | ||
1505 | if (!raw_spin_trylock(&p->pi_lock)) { | 1507 | if (!raw_spin_trylock(&p->pi_lock)) { |
@@ -4999,7 +5001,7 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) | |||
4999 | } | 5001 | } |
5000 | 5002 | ||
5001 | static int min_load_idx = 0; | 5003 | static int min_load_idx = 0; |
5002 | static int max_load_idx = CPU_LOAD_IDX_MAX; | 5004 | static int max_load_idx = CPU_LOAD_IDX_MAX-1; |
5003 | 5005 | ||
5004 | static void | 5006 | static void |
5005 | set_table_entry(struct ctl_table *entry, | 5007 | set_table_entry(struct ctl_table *entry, |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ed12cbb135f4..e93cca92f38b 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -310,7 +310,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | |||
310 | 310 | ||
311 | t = tsk; | 311 | t = tsk; |
312 | do { | 312 | do { |
313 | task_cputime(tsk, &utime, &stime); | 313 | task_cputime(t, &utime, &stime); |
314 | times->utime += utime; | 314 | times->utime += utime; |
315 | times->stime += stime; | 315 | times->stime += stime; |
316 | times->sum_exec_runtime += task_sched_runtime(t); | 316 | times->sum_exec_runtime += task_sched_runtime(t); |
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 8eaed9aa9cf0..02fc5c933673 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -185,8 +185,18 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) | |||
185 | } | 185 | } |
186 | get_task_struct(tsk); | 186 | get_task_struct(tsk); |
187 | *per_cpu_ptr(ht->store, cpu) = tsk; | 187 | *per_cpu_ptr(ht->store, cpu) = tsk; |
188 | if (ht->create) | 188 | if (ht->create) { |
189 | ht->create(cpu); | 189 | /* |
190 | * Make sure that the task has actually scheduled out | ||
191 | * into park position, before calling the create | ||
192 | * callback. At least the migration thread callback | ||
193 | * requires that the task is off the runqueue. | ||
194 | */ | ||
195 | if (!wait_task_inactive(tsk, TASK_PARKED)) | ||
196 | WARN_ON(1); | ||
197 | else | ||
198 | ht->create(cpu); | ||
199 | } | ||
190 | return 0; | 200 | return 0; |
191 | } | 201 | } |
192 | 202 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 39c9c4a2949f..0da73cf73e60 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -324,7 +324,6 @@ void kernel_restart_prepare(char *cmd) | |||
324 | system_state = SYSTEM_RESTART; | 324 | system_state = SYSTEM_RESTART; |
325 | usermodehelper_disable(); | 325 | usermodehelper_disable(); |
326 | device_shutdown(); | 326 | device_shutdown(); |
327 | syscore_shutdown(); | ||
328 | } | 327 | } |
329 | 328 | ||
330 | /** | 329 | /** |
@@ -370,6 +369,7 @@ void kernel_restart(char *cmd) | |||
370 | { | 369 | { |
371 | kernel_restart_prepare(cmd); | 370 | kernel_restart_prepare(cmd); |
372 | disable_nonboot_cpus(); | 371 | disable_nonboot_cpus(); |
372 | syscore_shutdown(); | ||
373 | if (!cmd) | 373 | if (!cmd) |
374 | printk(KERN_EMERG "Restarting system.\n"); | 374 | printk(KERN_EMERG "Restarting system.\n"); |
375 | else | 375 | else |
@@ -395,6 +395,7 @@ static void kernel_shutdown_prepare(enum system_states state) | |||
395 | void kernel_halt(void) | 395 | void kernel_halt(void) |
396 | { | 396 | { |
397 | kernel_shutdown_prepare(SYSTEM_HALT); | 397 | kernel_shutdown_prepare(SYSTEM_HALT); |
398 | disable_nonboot_cpus(); | ||
398 | syscore_shutdown(); | 399 | syscore_shutdown(); |
399 | printk(KERN_EMERG "System halted.\n"); | 400 | printk(KERN_EMERG "System halted.\n"); |
400 | kmsg_dump(KMSG_DUMP_HALT); | 401 | kmsg_dump(KMSG_DUMP_HALT); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7e897106b7e0..b3fde6d7b7fc 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -694,7 +694,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) | |||
694 | free_page(tmp); | 694 | free_page(tmp); |
695 | } | 695 | } |
696 | 696 | ||
697 | free_page((unsigned long)stat->pages); | ||
698 | stat->pages = NULL; | 697 | stat->pages = NULL; |
699 | stat->start = NULL; | 698 | stat->start = NULL; |
700 | 699 | ||
@@ -1053,6 +1052,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) | |||
1053 | 1052 | ||
1054 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | 1053 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; |
1055 | 1054 | ||
1055 | loff_t | ||
1056 | ftrace_filter_lseek(struct file *file, loff_t offset, int whence) | ||
1057 | { | ||
1058 | loff_t ret; | ||
1059 | |||
1060 | if (file->f_mode & FMODE_READ) | ||
1061 | ret = seq_lseek(file, offset, whence); | ||
1062 | else | ||
1063 | file->f_pos = ret = 1; | ||
1064 | |||
1065 | return ret; | ||
1066 | } | ||
1067 | |||
1056 | #ifdef CONFIG_DYNAMIC_FTRACE | 1068 | #ifdef CONFIG_DYNAMIC_FTRACE |
1057 | 1069 | ||
1058 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 1070 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
@@ -2613,7 +2625,7 @@ static void ftrace_filter_reset(struct ftrace_hash *hash) | |||
2613 | * routine, you can use ftrace_filter_write() for the write | 2625 | * routine, you can use ftrace_filter_write() for the write |
2614 | * routine if @flag has FTRACE_ITER_FILTER set, or | 2626 | * routine if @flag has FTRACE_ITER_FILTER set, or |
2615 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. | 2627 | * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set. |
2616 | * ftrace_regex_lseek() should be used as the lseek routine, and | 2628 | * ftrace_filter_lseek() should be used as the lseek routine, and |
2617 | * release must call ftrace_regex_release(). | 2629 | * release must call ftrace_regex_release(). |
2618 | */ | 2630 | */ |
2619 | int | 2631 | int |
@@ -2697,19 +2709,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file) | |||
2697 | inode, file); | 2709 | inode, file); |
2698 | } | 2710 | } |
2699 | 2711 | ||
2700 | loff_t | ||
2701 | ftrace_regex_lseek(struct file *file, loff_t offset, int whence) | ||
2702 | { | ||
2703 | loff_t ret; | ||
2704 | |||
2705 | if (file->f_mode & FMODE_READ) | ||
2706 | ret = seq_lseek(file, offset, whence); | ||
2707 | else | ||
2708 | file->f_pos = ret = 1; | ||
2709 | |||
2710 | return ret; | ||
2711 | } | ||
2712 | |||
2713 | static int ftrace_match(char *str, char *regex, int len, int type) | 2712 | static int ftrace_match(char *str, char *regex, int len, int type) |
2714 | { | 2713 | { |
2715 | int matched = 0; | 2714 | int matched = 0; |
@@ -3441,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; | |||
3441 | 3440 | ||
3442 | static int __init set_ftrace_notrace(char *str) | 3441 | static int __init set_ftrace_notrace(char *str) |
3443 | { | 3442 | { |
3444 | strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); | 3443 | strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); |
3445 | return 1; | 3444 | return 1; |
3446 | } | 3445 | } |
3447 | __setup("ftrace_notrace=", set_ftrace_notrace); | 3446 | __setup("ftrace_notrace=", set_ftrace_notrace); |
3448 | 3447 | ||
3449 | static int __init set_ftrace_filter(char *str) | 3448 | static int __init set_ftrace_filter(char *str) |
3450 | { | 3449 | { |
3451 | strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); | 3450 | strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); |
3452 | return 1; | 3451 | return 1; |
3453 | } | 3452 | } |
3454 | __setup("ftrace_filter=", set_ftrace_filter); | 3453 | __setup("ftrace_filter=", set_ftrace_filter); |
@@ -3571,7 +3570,7 @@ static const struct file_operations ftrace_filter_fops = { | |||
3571 | .open = ftrace_filter_open, | 3570 | .open = ftrace_filter_open, |
3572 | .read = seq_read, | 3571 | .read = seq_read, |
3573 | .write = ftrace_filter_write, | 3572 | .write = ftrace_filter_write, |
3574 | .llseek = ftrace_regex_lseek, | 3573 | .llseek = ftrace_filter_lseek, |
3575 | .release = ftrace_regex_release, | 3574 | .release = ftrace_regex_release, |
3576 | }; | 3575 | }; |
3577 | 3576 | ||
@@ -3579,7 +3578,7 @@ static const struct file_operations ftrace_notrace_fops = { | |||
3579 | .open = ftrace_notrace_open, | 3578 | .open = ftrace_notrace_open, |
3580 | .read = seq_read, | 3579 | .read = seq_read, |
3581 | .write = ftrace_notrace_write, | 3580 | .write = ftrace_notrace_write, |
3582 | .llseek = ftrace_regex_lseek, | 3581 | .llseek = ftrace_filter_lseek, |
3583 | .release = ftrace_regex_release, | 3582 | .release = ftrace_regex_release, |
3584 | }; | 3583 | }; |
3585 | 3584 | ||
@@ -3784,8 +3783,8 @@ static const struct file_operations ftrace_graph_fops = { | |||
3784 | .open = ftrace_graph_open, | 3783 | .open = ftrace_graph_open, |
3785 | .read = seq_read, | 3784 | .read = seq_read, |
3786 | .write = ftrace_graph_write, | 3785 | .write = ftrace_graph_write, |
3786 | .llseek = ftrace_filter_lseek, | ||
3787 | .release = ftrace_graph_release, | 3787 | .release = ftrace_graph_release, |
3788 | .llseek = seq_lseek, | ||
3789 | }; | 3788 | }; |
3790 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 3789 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
3791 | 3790 | ||
@@ -4440,7 +4439,7 @@ static const struct file_operations ftrace_pid_fops = { | |||
4440 | .open = ftrace_pid_open, | 4439 | .open = ftrace_pid_open, |
4441 | .write = ftrace_pid_write, | 4440 | .write = ftrace_pid_write, |
4442 | .read = seq_read, | 4441 | .read = seq_read, |
4443 | .llseek = seq_lseek, | 4442 | .llseek = ftrace_filter_lseek, |
4444 | .release = ftrace_pid_release, | 4443 | .release = ftrace_pid_release, |
4445 | }; | 4444 | }; |
4446 | 4445 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 7ba7fc76f9eb..66338c4f7f4b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -132,7 +132,7 @@ static char *default_bootup_tracer; | |||
132 | 132 | ||
133 | static int __init set_cmdline_ftrace(char *str) | 133 | static int __init set_cmdline_ftrace(char *str) |
134 | { | 134 | { |
135 | strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); | 135 | strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); |
136 | default_bootup_tracer = bootup_tracer_buf; | 136 | default_bootup_tracer = bootup_tracer_buf; |
137 | /* We are using ftrace early, expand it */ | 137 | /* We are using ftrace early, expand it */ |
138 | ring_buffer_expanded = 1; | 138 | ring_buffer_expanded = 1; |
@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata; | |||
162 | 162 | ||
163 | static int __init set_trace_boot_options(char *str) | 163 | static int __init set_trace_boot_options(char *str) |
164 | { | 164 | { |
165 | strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); | 165 | strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); |
166 | trace_boot_options = trace_boot_options_buf; | 166 | trace_boot_options = trace_boot_options_buf; |
167 | return 0; | 167 | return 0; |
168 | } | 168 | } |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 42ca822fc701..83a8b5b7bd35 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -322,7 +322,7 @@ static const struct file_operations stack_trace_filter_fops = { | |||
322 | .open = stack_trace_filter_open, | 322 | .open = stack_trace_filter_open, |
323 | .read = seq_read, | 323 | .read = seq_read, |
324 | .write = ftrace_filter_write, | 324 | .write = ftrace_filter_write, |
325 | .llseek = ftrace_regex_lseek, | 325 | .llseek = ftrace_filter_lseek, |
326 | .release = ftrace_regex_release, | 326 | .release = ftrace_regex_release, |
327 | }; | 327 | }; |
328 | 328 | ||