diff options
| author | Wolfram Sang <wsa@the-dreams.de> | 2018-05-09 11:40:44 -0400 |
|---|---|---|
| committer | Wolfram Sang <wsa@the-dreams.de> | 2018-05-09 11:40:44 -0400 |
| commit | 4c0330208f6bfe5285bae239fdb2a7ec1ce1f78d (patch) | |
| tree | 257c871b0be4c7eafa0e6911ab596bfb0f51a104 /kernel | |
| parent | 7781edaed63e9396fc913e0899cb197562e6f1a0 (diff) | |
| parent | 76aa3de7095f15af7300012cb29ea8ab93eec348 (diff) | |
Merge tag 'at24-4.17-rc5-fixes-for-wolfram' of git://git.kernel.org/pub/scm/linux/kernel/git/brgl/linux into i2c/for-current
Single bug-fix for a regression introduced during the 4.17 merge window.
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/arraymap.c | 3 | ||||
| -rw-r--r-- | kernel/bpf/sockmap.c | 99 | ||||
| -rw-r--r-- | kernel/bpf/syscall.c | 4 | ||||
| -rw-r--r-- | kernel/events/uprobes.c | 7 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 63 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_events_hist.c | 12 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 35 | ||||
| -rw-r--r-- | kernel/tracepoint.c | 4 |
10 files changed, 155 insertions, 78 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 14750e7c5ee4..027107f4be53 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
| @@ -476,7 +476,7 @@ static u32 prog_fd_array_sys_lookup_elem(void *ptr) | |||
| 476 | } | 476 | } |
| 477 | 477 | ||
| 478 | /* decrement refcnt of all bpf_progs that are stored in this map */ | 478 | /* decrement refcnt of all bpf_progs that are stored in this map */ |
| 479 | void bpf_fd_array_map_clear(struct bpf_map *map) | 479 | static void bpf_fd_array_map_clear(struct bpf_map *map) |
| 480 | { | 480 | { |
| 481 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 481 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 482 | int i; | 482 | int i; |
| @@ -495,6 +495,7 @@ const struct bpf_map_ops prog_array_map_ops = { | |||
| 495 | .map_fd_get_ptr = prog_fd_array_get_ptr, | 495 | .map_fd_get_ptr = prog_fd_array_get_ptr, |
| 496 | .map_fd_put_ptr = prog_fd_array_put_ptr, | 496 | .map_fd_put_ptr = prog_fd_array_put_ptr, |
| 497 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, | 497 | .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem, |
| 498 | .map_release_uref = bpf_fd_array_map_clear, | ||
| 498 | }; | 499 | }; |
| 499 | 500 | ||
| 500 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, | 501 | static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file, |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index a3b21385e947..098eca568c2b 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
| @@ -43,6 +43,7 @@ | |||
| 43 | #include <net/tcp.h> | 43 | #include <net/tcp.h> |
| 44 | #include <linux/ptr_ring.h> | 44 | #include <linux/ptr_ring.h> |
| 45 | #include <net/inet_common.h> | 45 | #include <net/inet_common.h> |
| 46 | #include <linux/sched/signal.h> | ||
| 46 | 47 | ||
| 47 | #define SOCK_CREATE_FLAG_MASK \ | 48 | #define SOCK_CREATE_FLAG_MASK \ |
| 48 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) | 49 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) |
| @@ -325,6 +326,9 @@ retry: | |||
| 325 | if (ret > 0) { | 326 | if (ret > 0) { |
| 326 | if (apply) | 327 | if (apply) |
| 327 | apply_bytes -= ret; | 328 | apply_bytes -= ret; |
| 329 | |||
| 330 | sg->offset += ret; | ||
| 331 | sg->length -= ret; | ||
| 328 | size -= ret; | 332 | size -= ret; |
| 329 | offset += ret; | 333 | offset += ret; |
| 330 | if (uncharge) | 334 | if (uncharge) |
| @@ -332,8 +336,6 @@ retry: | |||
| 332 | goto retry; | 336 | goto retry; |
| 333 | } | 337 | } |
| 334 | 338 | ||
| 335 | sg->length = size; | ||
| 336 | sg->offset = offset; | ||
| 337 | return ret; | 339 | return ret; |
| 338 | } | 340 | } |
| 339 | 341 | ||
| @@ -391,7 +393,8 @@ static void return_mem_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | |||
| 391 | } while (i != md->sg_end); | 393 | } while (i != md->sg_end); |
| 392 | } | 394 | } |
| 393 | 395 | ||
| 394 | static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | 396 | static void free_bytes_sg(struct sock *sk, int bytes, |
| 397 | struct sk_msg_buff *md, bool charge) | ||
| 395 | { | 398 | { |
| 396 | struct scatterlist *sg = md->sg_data; | 399 | struct scatterlist *sg = md->sg_data; |
| 397 | int i = md->sg_start, free; | 400 | int i = md->sg_start, free; |
| @@ -401,11 +404,13 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | |||
| 401 | if (bytes < free) { | 404 | if (bytes < free) { |
| 402 | sg[i].length -= bytes; | 405 | sg[i].length -= bytes; |
| 403 | sg[i].offset += bytes; | 406 | sg[i].offset += bytes; |
| 404 | sk_mem_uncharge(sk, bytes); | 407 | if (charge) |
| 408 | sk_mem_uncharge(sk, bytes); | ||
| 405 | break; | 409 | break; |
| 406 | } | 410 | } |
| 407 | 411 | ||
| 408 | sk_mem_uncharge(sk, sg[i].length); | 412 | if (charge) |
| 413 | sk_mem_uncharge(sk, sg[i].length); | ||
| 409 | put_page(sg_page(&sg[i])); | 414 | put_page(sg_page(&sg[i])); |
| 410 | bytes -= sg[i].length; | 415 | bytes -= sg[i].length; |
| 411 | sg[i].length = 0; | 416 | sg[i].length = 0; |
| @@ -416,6 +421,7 @@ static void free_bytes_sg(struct sock *sk, int bytes, struct sk_msg_buff *md) | |||
| 416 | if (i == MAX_SKB_FRAGS) | 421 | if (i == MAX_SKB_FRAGS) |
| 417 | i = 0; | 422 | i = 0; |
| 418 | } | 423 | } |
| 424 | md->sg_start = i; | ||
| 419 | } | 425 | } |
| 420 | 426 | ||
| 421 | static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) | 427 | static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md) |
| @@ -523,8 +529,6 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, | |||
| 523 | i = md->sg_start; | 529 | i = md->sg_start; |
| 524 | 530 | ||
| 525 | do { | 531 | do { |
| 526 | r->sg_data[i] = md->sg_data[i]; | ||
| 527 | |||
| 528 | size = (apply && apply_bytes < md->sg_data[i].length) ? | 532 | size = (apply && apply_bytes < md->sg_data[i].length) ? |
| 529 | apply_bytes : md->sg_data[i].length; | 533 | apply_bytes : md->sg_data[i].length; |
| 530 | 534 | ||
| @@ -535,6 +539,7 @@ static int bpf_tcp_ingress(struct sock *sk, int apply_bytes, | |||
| 535 | } | 539 | } |
| 536 | 540 | ||
| 537 | sk_mem_charge(sk, size); | 541 | sk_mem_charge(sk, size); |
| 542 | r->sg_data[i] = md->sg_data[i]; | ||
| 538 | r->sg_data[i].length = size; | 543 | r->sg_data[i].length = size; |
| 539 | md->sg_data[i].length -= size; | 544 | md->sg_data[i].length -= size; |
| 540 | md->sg_data[i].offset += size; | 545 | md->sg_data[i].offset += size; |
| @@ -575,10 +580,10 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, | |||
| 575 | struct sk_msg_buff *md, | 580 | struct sk_msg_buff *md, |
| 576 | int flags) | 581 | int flags) |
| 577 | { | 582 | { |
| 583 | bool ingress = !!(md->flags & BPF_F_INGRESS); | ||
| 578 | struct smap_psock *psock; | 584 | struct smap_psock *psock; |
| 579 | struct scatterlist *sg; | 585 | struct scatterlist *sg; |
| 580 | int i, err, free = 0; | 586 | int err = 0; |
| 581 | bool ingress = !!(md->flags & BPF_F_INGRESS); | ||
| 582 | 587 | ||
| 583 | sg = md->sg_data; | 588 | sg = md->sg_data; |
| 584 | 589 | ||
| @@ -606,16 +611,8 @@ static int bpf_tcp_sendmsg_do_redirect(struct sock *sk, int send, | |||
| 606 | out_rcu: | 611 | out_rcu: |
| 607 | rcu_read_unlock(); | 612 | rcu_read_unlock(); |
| 608 | out: | 613 | out: |
| 609 | i = md->sg_start; | 614 | free_bytes_sg(NULL, send, md, false); |
| 610 | while (sg[i].length) { | 615 | return err; |
| 611 | free += sg[i].length; | ||
| 612 | put_page(sg_page(&sg[i])); | ||
| 613 | sg[i].length = 0; | ||
| 614 | i++; | ||
| 615 | if (i == MAX_SKB_FRAGS) | ||
| 616 | i = 0; | ||
| 617 | } | ||
| 618 | return free; | ||
| 619 | } | 616 | } |
| 620 | 617 | ||
| 621 | static inline void bpf_md_init(struct smap_psock *psock) | 618 | static inline void bpf_md_init(struct smap_psock *psock) |
| @@ -700,19 +697,26 @@ more_data: | |||
| 700 | err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); | 697 | err = bpf_tcp_sendmsg_do_redirect(redir, send, m, flags); |
| 701 | lock_sock(sk); | 698 | lock_sock(sk); |
| 702 | 699 | ||
| 700 | if (unlikely(err < 0)) { | ||
| 701 | free_start_sg(sk, m); | ||
| 702 | psock->sg_size = 0; | ||
| 703 | if (!cork) | ||
| 704 | *copied -= send; | ||
| 705 | } else { | ||
| 706 | psock->sg_size -= send; | ||
| 707 | } | ||
| 708 | |||
| 703 | if (cork) { | 709 | if (cork) { |
| 704 | free_start_sg(sk, m); | 710 | free_start_sg(sk, m); |
| 711 | psock->sg_size = 0; | ||
| 705 | kfree(m); | 712 | kfree(m); |
| 706 | m = NULL; | 713 | m = NULL; |
| 714 | err = 0; | ||
| 707 | } | 715 | } |
| 708 | if (unlikely(err)) | ||
| 709 | *copied -= err; | ||
| 710 | else | ||
| 711 | psock->sg_size -= send; | ||
| 712 | break; | 716 | break; |
| 713 | case __SK_DROP: | 717 | case __SK_DROP: |
| 714 | default: | 718 | default: |
| 715 | free_bytes_sg(sk, send, m); | 719 | free_bytes_sg(sk, send, m, true); |
| 716 | apply_bytes_dec(psock, send); | 720 | apply_bytes_dec(psock, send); |
| 717 | *copied -= send; | 721 | *copied -= send; |
| 718 | psock->sg_size -= send; | 722 | psock->sg_size -= send; |
| @@ -732,6 +736,26 @@ out_err: | |||
| 732 | return err; | 736 | return err; |
| 733 | } | 737 | } |
| 734 | 738 | ||
| 739 | static int bpf_wait_data(struct sock *sk, | ||
| 740 | struct smap_psock *psk, int flags, | ||
| 741 | long timeo, int *err) | ||
| 742 | { | ||
| 743 | int rc; | ||
| 744 | |||
| 745 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | ||
| 746 | |||
| 747 | add_wait_queue(sk_sleep(sk), &wait); | ||
| 748 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | ||
| 749 | rc = sk_wait_event(sk, &timeo, | ||
| 750 | !list_empty(&psk->ingress) || | ||
| 751 | !skb_queue_empty(&sk->sk_receive_queue), | ||
| 752 | &wait); | ||
| 753 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); | ||
| 754 | remove_wait_queue(sk_sleep(sk), &wait); | ||
| 755 | |||
| 756 | return rc; | ||
| 757 | } | ||
| 758 | |||
| 735 | static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | 759 | static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 736 | int nonblock, int flags, int *addr_len) | 760 | int nonblock, int flags, int *addr_len) |
| 737 | { | 761 | { |
| @@ -755,6 +779,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
| 755 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | 779 | return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); |
| 756 | 780 | ||
| 757 | lock_sock(sk); | 781 | lock_sock(sk); |
| 782 | bytes_ready: | ||
| 758 | while (copied != len) { | 783 | while (copied != len) { |
| 759 | struct scatterlist *sg; | 784 | struct scatterlist *sg; |
| 760 | struct sk_msg_buff *md; | 785 | struct sk_msg_buff *md; |
| @@ -809,6 +834,28 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
| 809 | } | 834 | } |
| 810 | } | 835 | } |
| 811 | 836 | ||
| 837 | if (!copied) { | ||
| 838 | long timeo; | ||
| 839 | int data; | ||
| 840 | int err = 0; | ||
| 841 | |||
| 842 | timeo = sock_rcvtimeo(sk, nonblock); | ||
| 843 | data = bpf_wait_data(sk, psock, flags, timeo, &err); | ||
| 844 | |||
| 845 | if (data) { | ||
| 846 | if (!skb_queue_empty(&sk->sk_receive_queue)) { | ||
| 847 | release_sock(sk); | ||
| 848 | smap_release_sock(psock, sk); | ||
| 849 | copied = tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len); | ||
| 850 | return copied; | ||
| 851 | } | ||
| 852 | goto bytes_ready; | ||
| 853 | } | ||
| 854 | |||
| 855 | if (err) | ||
| 856 | copied = err; | ||
| 857 | } | ||
| 858 | |||
| 812 | release_sock(sk); | 859 | release_sock(sk); |
| 813 | smap_release_sock(psock, sk); | 860 | smap_release_sock(psock, sk); |
| 814 | return copied; | 861 | return copied; |
| @@ -1831,7 +1878,7 @@ static int sock_map_update_elem(struct bpf_map *map, | |||
| 1831 | return err; | 1878 | return err; |
| 1832 | } | 1879 | } |
| 1833 | 1880 | ||
| 1834 | static void sock_map_release(struct bpf_map *map, struct file *map_file) | 1881 | static void sock_map_release(struct bpf_map *map) |
| 1835 | { | 1882 | { |
| 1836 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); | 1883 | struct bpf_stab *stab = container_of(map, struct bpf_stab, map); |
| 1837 | struct bpf_prog *orig; | 1884 | struct bpf_prog *orig; |
| @@ -1855,7 +1902,7 @@ const struct bpf_map_ops sock_map_ops = { | |||
| 1855 | .map_get_next_key = sock_map_get_next_key, | 1902 | .map_get_next_key = sock_map_get_next_key, |
| 1856 | .map_update_elem = sock_map_update_elem, | 1903 | .map_update_elem = sock_map_update_elem, |
| 1857 | .map_delete_elem = sock_map_delete_elem, | 1904 | .map_delete_elem = sock_map_delete_elem, |
| 1858 | .map_release = sock_map_release, | 1905 | .map_release_uref = sock_map_release, |
| 1859 | }; | 1906 | }; |
| 1860 | 1907 | ||
| 1861 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, | 1908 | BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock, |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 4ca46df19c9a..ebfe9f29dae8 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
| @@ -257,8 +257,8 @@ static void bpf_map_free_deferred(struct work_struct *work) | |||
| 257 | static void bpf_map_put_uref(struct bpf_map *map) | 257 | static void bpf_map_put_uref(struct bpf_map *map) |
| 258 | { | 258 | { |
| 259 | if (atomic_dec_and_test(&map->usercnt)) { | 259 | if (atomic_dec_and_test(&map->usercnt)) { |
| 260 | if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) | 260 | if (map->ops->map_release_uref) |
| 261 | bpf_fd_array_map_clear(map); | 261 | map->ops->map_release_uref(map); |
| 262 | } | 262 | } |
| 263 | } | 263 | } |
| 264 | 264 | ||
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index ce6848e46e94..1725b902983f 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
| @@ -491,7 +491,7 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) | |||
| 491 | if (!uprobe) | 491 | if (!uprobe) |
| 492 | return NULL; | 492 | return NULL; |
| 493 | 493 | ||
| 494 | uprobe->inode = igrab(inode); | 494 | uprobe->inode = inode; |
| 495 | uprobe->offset = offset; | 495 | uprobe->offset = offset; |
| 496 | init_rwsem(&uprobe->register_rwsem); | 496 | init_rwsem(&uprobe->register_rwsem); |
| 497 | init_rwsem(&uprobe->consumer_rwsem); | 497 | init_rwsem(&uprobe->consumer_rwsem); |
| @@ -502,7 +502,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) | |||
| 502 | if (cur_uprobe) { | 502 | if (cur_uprobe) { |
| 503 | kfree(uprobe); | 503 | kfree(uprobe); |
| 504 | uprobe = cur_uprobe; | 504 | uprobe = cur_uprobe; |
| 505 | iput(inode); | ||
| 506 | } | 505 | } |
| 507 | 506 | ||
| 508 | return uprobe; | 507 | return uprobe; |
| @@ -701,7 +700,6 @@ static void delete_uprobe(struct uprobe *uprobe) | |||
| 701 | rb_erase(&uprobe->rb_node, &uprobes_tree); | 700 | rb_erase(&uprobe->rb_node, &uprobes_tree); |
| 702 | spin_unlock(&uprobes_treelock); | 701 | spin_unlock(&uprobes_treelock); |
| 703 | RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ | 702 | RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */ |
| 704 | iput(uprobe->inode); | ||
| 705 | put_uprobe(uprobe); | 703 | put_uprobe(uprobe); |
| 706 | } | 704 | } |
| 707 | 705 | ||
| @@ -873,7 +871,8 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u | |||
| 873 | * tuple). Creation refcount stops uprobe_unregister from freeing the | 871 | * tuple). Creation refcount stops uprobe_unregister from freeing the |
| 874 | * @uprobe even before the register operation is complete. Creation | 872 | * @uprobe even before the register operation is complete. Creation |
| 875 | * refcount is released when the last @uc for the @uprobe | 873 | * refcount is released when the last @uc for the @uprobe |
| 876 | * unregisters. | 874 | * unregisters. Caller of uprobe_register() is required to keep @inode |
| 875 | * (and the containing mount) referenced. | ||
| 877 | * | 876 | * |
| 878 | * Return errno if it cannot successully install probes | 877 | * Return errno if it cannot successully install probes |
| 879 | * else return 0 (success) | 878 | * else return 0 (success) |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 0e974cface0b..84f37420fcf5 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -119,6 +119,16 @@ static DEFINE_SPINLOCK(watchdog_lock); | |||
| 119 | static int watchdog_running; | 119 | static int watchdog_running; |
| 120 | static atomic_t watchdog_reset_pending; | 120 | static atomic_t watchdog_reset_pending; |
| 121 | 121 | ||
| 122 | static void inline clocksource_watchdog_lock(unsigned long *flags) | ||
| 123 | { | ||
| 124 | spin_lock_irqsave(&watchdog_lock, *flags); | ||
| 125 | } | ||
| 126 | |||
| 127 | static void inline clocksource_watchdog_unlock(unsigned long *flags) | ||
| 128 | { | ||
| 129 | spin_unlock_irqrestore(&watchdog_lock, *flags); | ||
| 130 | } | ||
| 131 | |||
| 122 | static int clocksource_watchdog_kthread(void *data); | 132 | static int clocksource_watchdog_kthread(void *data); |
| 123 | static void __clocksource_change_rating(struct clocksource *cs, int rating); | 133 | static void __clocksource_change_rating(struct clocksource *cs, int rating); |
| 124 | 134 | ||
| @@ -142,9 +152,19 @@ static void __clocksource_unstable(struct clocksource *cs) | |||
| 142 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); | 152 | cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); |
| 143 | cs->flags |= CLOCK_SOURCE_UNSTABLE; | 153 | cs->flags |= CLOCK_SOURCE_UNSTABLE; |
| 144 | 154 | ||
| 155 | /* | ||
| 156 | * If the clocksource is registered clocksource_watchdog_kthread() will | ||
| 157 | * re-rate and re-select. | ||
| 158 | */ | ||
| 159 | if (list_empty(&cs->list)) { | ||
| 160 | cs->rating = 0; | ||
| 161 | return; | ||
| 162 | } | ||
| 163 | |||
| 145 | if (cs->mark_unstable) | 164 | if (cs->mark_unstable) |
| 146 | cs->mark_unstable(cs); | 165 | cs->mark_unstable(cs); |
| 147 | 166 | ||
| 167 | /* kick clocksource_watchdog_kthread() */ | ||
| 148 | if (finished_booting) | 168 | if (finished_booting) |
| 149 | schedule_work(&watchdog_work); | 169 | schedule_work(&watchdog_work); |
| 150 | } | 170 | } |
| @@ -153,10 +173,8 @@ static void __clocksource_unstable(struct clocksource *cs) | |||
| 153 | * clocksource_mark_unstable - mark clocksource unstable via watchdog | 173 | * clocksource_mark_unstable - mark clocksource unstable via watchdog |
| 154 | * @cs: clocksource to be marked unstable | 174 | * @cs: clocksource to be marked unstable |
| 155 | * | 175 | * |
| 156 | * This function is called instead of clocksource_change_rating from | 176 | * This function is called by the x86 TSC code to mark clocksources as unstable; |
| 157 | * cpu hotplug code to avoid a deadlock between the clocksource mutex | 177 | * it defers demotion and re-selection to a kthread. |
| 158 | * and the cpu hotplug mutex. It defers the update of the clocksource | ||
| 159 | * to the watchdog thread. | ||
| 160 | */ | 178 | */ |
| 161 | void clocksource_mark_unstable(struct clocksource *cs) | 179 | void clocksource_mark_unstable(struct clocksource *cs) |
| 162 | { | 180 | { |
| @@ -164,7 +182,7 @@ void clocksource_mark_unstable(struct clocksource *cs) | |||
| 164 | 182 | ||
| 165 | spin_lock_irqsave(&watchdog_lock, flags); | 183 | spin_lock_irqsave(&watchdog_lock, flags); |
| 166 | if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { | 184 | if (!(cs->flags & CLOCK_SOURCE_UNSTABLE)) { |
| 167 | if (list_empty(&cs->wd_list)) | 185 | if (!list_empty(&cs->list) && list_empty(&cs->wd_list)) |
| 168 | list_add(&cs->wd_list, &watchdog_list); | 186 | list_add(&cs->wd_list, &watchdog_list); |
| 169 | __clocksource_unstable(cs); | 187 | __clocksource_unstable(cs); |
| 170 | } | 188 | } |
| @@ -319,9 +337,8 @@ static void clocksource_resume_watchdog(void) | |||
| 319 | 337 | ||
| 320 | static void clocksource_enqueue_watchdog(struct clocksource *cs) | 338 | static void clocksource_enqueue_watchdog(struct clocksource *cs) |
| 321 | { | 339 | { |
| 322 | unsigned long flags; | 340 | INIT_LIST_HEAD(&cs->wd_list); |
| 323 | 341 | ||
| 324 | spin_lock_irqsave(&watchdog_lock, flags); | ||
| 325 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | 342 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
| 326 | /* cs is a clocksource to be watched. */ | 343 | /* cs is a clocksource to be watched. */ |
| 327 | list_add(&cs->wd_list, &watchdog_list); | 344 | list_add(&cs->wd_list, &watchdog_list); |
| @@ -331,7 +348,6 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) | |||
| 331 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 348 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
| 332 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 349 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
| 333 | } | 350 | } |
| 334 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
| 335 | } | 351 | } |
| 336 | 352 | ||
| 337 | static void clocksource_select_watchdog(bool fallback) | 353 | static void clocksource_select_watchdog(bool fallback) |
| @@ -373,9 +389,6 @@ static void clocksource_select_watchdog(bool fallback) | |||
| 373 | 389 | ||
| 374 | static void clocksource_dequeue_watchdog(struct clocksource *cs) | 390 | static void clocksource_dequeue_watchdog(struct clocksource *cs) |
| 375 | { | 391 | { |
| 376 | unsigned long flags; | ||
| 377 | |||
| 378 | spin_lock_irqsave(&watchdog_lock, flags); | ||
| 379 | if (cs != watchdog) { | 392 | if (cs != watchdog) { |
| 380 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { | 393 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) { |
| 381 | /* cs is a watched clocksource. */ | 394 | /* cs is a watched clocksource. */ |
| @@ -384,21 +397,19 @@ static void clocksource_dequeue_watchdog(struct clocksource *cs) | |||
| 384 | clocksource_stop_watchdog(); | 397 | clocksource_stop_watchdog(); |
| 385 | } | 398 | } |
| 386 | } | 399 | } |
| 387 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
| 388 | } | 400 | } |
| 389 | 401 | ||
| 390 | static int __clocksource_watchdog_kthread(void) | 402 | static int __clocksource_watchdog_kthread(void) |
| 391 | { | 403 | { |
| 392 | struct clocksource *cs, *tmp; | 404 | struct clocksource *cs, *tmp; |
| 393 | unsigned long flags; | 405 | unsigned long flags; |
| 394 | LIST_HEAD(unstable); | ||
| 395 | int select = 0; | 406 | int select = 0; |
| 396 | 407 | ||
| 397 | spin_lock_irqsave(&watchdog_lock, flags); | 408 | spin_lock_irqsave(&watchdog_lock, flags); |
| 398 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { | 409 | list_for_each_entry_safe(cs, tmp, &watchdog_list, wd_list) { |
| 399 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { | 410 | if (cs->flags & CLOCK_SOURCE_UNSTABLE) { |
| 400 | list_del_init(&cs->wd_list); | 411 | list_del_init(&cs->wd_list); |
| 401 | list_add(&cs->wd_list, &unstable); | 412 | __clocksource_change_rating(cs, 0); |
| 402 | select = 1; | 413 | select = 1; |
| 403 | } | 414 | } |
| 404 | if (cs->flags & CLOCK_SOURCE_RESELECT) { | 415 | if (cs->flags & CLOCK_SOURCE_RESELECT) { |
| @@ -410,11 +421,6 @@ static int __clocksource_watchdog_kthread(void) | |||
| 410 | clocksource_stop_watchdog(); | 421 | clocksource_stop_watchdog(); |
| 411 | spin_unlock_irqrestore(&watchdog_lock, flags); | 422 | spin_unlock_irqrestore(&watchdog_lock, flags); |
| 412 | 423 | ||
| 413 | /* Needs to be done outside of watchdog lock */ | ||
| 414 | list_for_each_entry_safe(cs, tmp, &unstable, wd_list) { | ||
| 415 | list_del_init(&cs->wd_list); | ||
| 416 | __clocksource_change_rating(cs, 0); | ||
| 417 | } | ||
| 418 | return select; | 424 | return select; |
| 419 | } | 425 | } |
| 420 | 426 | ||
| @@ -447,6 +453,9 @@ static inline int __clocksource_watchdog_kthread(void) { return 0; } | |||
| 447 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } | 453 | static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } |
| 448 | void clocksource_mark_unstable(struct clocksource *cs) { } | 454 | void clocksource_mark_unstable(struct clocksource *cs) { } |
| 449 | 455 | ||
| 456 | static void inline clocksource_watchdog_lock(unsigned long *flags) { } | ||
| 457 | static void inline clocksource_watchdog_unlock(unsigned long *flags) { } | ||
| 458 | |||
| 450 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ | 459 | #endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ |
| 451 | 460 | ||
| 452 | /** | 461 | /** |
| @@ -779,14 +788,19 @@ EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale); | |||
| 779 | */ | 788 | */ |
| 780 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | 789 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) |
| 781 | { | 790 | { |
| 791 | unsigned long flags; | ||
| 782 | 792 | ||
| 783 | /* Initialize mult/shift and max_idle_ns */ | 793 | /* Initialize mult/shift and max_idle_ns */ |
| 784 | __clocksource_update_freq_scale(cs, scale, freq); | 794 | __clocksource_update_freq_scale(cs, scale, freq); |
| 785 | 795 | ||
| 786 | /* Add clocksource to the clocksource list */ | 796 | /* Add clocksource to the clocksource list */ |
| 787 | mutex_lock(&clocksource_mutex); | 797 | mutex_lock(&clocksource_mutex); |
| 798 | |||
| 799 | clocksource_watchdog_lock(&flags); | ||
| 788 | clocksource_enqueue(cs); | 800 | clocksource_enqueue(cs); |
| 789 | clocksource_enqueue_watchdog(cs); | 801 | clocksource_enqueue_watchdog(cs); |
| 802 | clocksource_watchdog_unlock(&flags); | ||
| 803 | |||
| 790 | clocksource_select(); | 804 | clocksource_select(); |
| 791 | clocksource_select_watchdog(false); | 805 | clocksource_select_watchdog(false); |
| 792 | mutex_unlock(&clocksource_mutex); | 806 | mutex_unlock(&clocksource_mutex); |
| @@ -808,8 +822,13 @@ static void __clocksource_change_rating(struct clocksource *cs, int rating) | |||
| 808 | */ | 822 | */ |
| 809 | void clocksource_change_rating(struct clocksource *cs, int rating) | 823 | void clocksource_change_rating(struct clocksource *cs, int rating) |
| 810 | { | 824 | { |
| 825 | unsigned long flags; | ||
| 826 | |||
| 811 | mutex_lock(&clocksource_mutex); | 827 | mutex_lock(&clocksource_mutex); |
| 828 | clocksource_watchdog_lock(&flags); | ||
| 812 | __clocksource_change_rating(cs, rating); | 829 | __clocksource_change_rating(cs, rating); |
| 830 | clocksource_watchdog_unlock(&flags); | ||
| 831 | |||
| 813 | clocksource_select(); | 832 | clocksource_select(); |
| 814 | clocksource_select_watchdog(false); | 833 | clocksource_select_watchdog(false); |
| 815 | mutex_unlock(&clocksource_mutex); | 834 | mutex_unlock(&clocksource_mutex); |
| @@ -821,6 +840,8 @@ EXPORT_SYMBOL(clocksource_change_rating); | |||
| 821 | */ | 840 | */ |
| 822 | static int clocksource_unbind(struct clocksource *cs) | 841 | static int clocksource_unbind(struct clocksource *cs) |
| 823 | { | 842 | { |
| 843 | unsigned long flags; | ||
| 844 | |||
| 824 | if (clocksource_is_watchdog(cs)) { | 845 | if (clocksource_is_watchdog(cs)) { |
| 825 | /* Select and try to install a replacement watchdog. */ | 846 | /* Select and try to install a replacement watchdog. */ |
| 826 | clocksource_select_watchdog(true); | 847 | clocksource_select_watchdog(true); |
| @@ -834,8 +855,12 @@ static int clocksource_unbind(struct clocksource *cs) | |||
| 834 | if (curr_clocksource == cs) | 855 | if (curr_clocksource == cs) |
| 835 | return -EBUSY; | 856 | return -EBUSY; |
| 836 | } | 857 | } |
| 858 | |||
| 859 | clocksource_watchdog_lock(&flags); | ||
| 837 | clocksource_dequeue_watchdog(cs); | 860 | clocksource_dequeue_watchdog(cs); |
| 838 | list_del_init(&cs->list); | 861 | list_del_init(&cs->list); |
| 862 | clocksource_watchdog_unlock(&flags); | ||
| 863 | |||
| 839 | return 0; | 864 | return 0; |
| 840 | } | 865 | } |
| 841 | 866 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 16bbf062018f..8d83bcf9ef69 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -5514,10 +5514,10 @@ static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer) | |||
| 5514 | ftrace_create_filter_files(&global_ops, d_tracer); | 5514 | ftrace_create_filter_files(&global_ops, d_tracer); |
| 5515 | 5515 | ||
| 5516 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 5516 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 5517 | trace_create_file("set_graph_function", 0444, d_tracer, | 5517 | trace_create_file("set_graph_function", 0644, d_tracer, |
| 5518 | NULL, | 5518 | NULL, |
| 5519 | &ftrace_graph_fops); | 5519 | &ftrace_graph_fops); |
| 5520 | trace_create_file("set_graph_notrace", 0444, d_tracer, | 5520 | trace_create_file("set_graph_notrace", 0644, d_tracer, |
| 5521 | NULL, | 5521 | NULL, |
| 5522 | &ftrace_graph_notrace_fops); | 5522 | &ftrace_graph_notrace_fops); |
| 5523 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 5523 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 0d7b3ffbecc2..b9061ed59bbd 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c | |||
| @@ -2466,6 +2466,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, | |||
| 2466 | else if (strcmp(modifier, "usecs") == 0) | 2466 | else if (strcmp(modifier, "usecs") == 0) |
| 2467 | *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; | 2467 | *flags |= HIST_FIELD_FL_TIMESTAMP_USECS; |
| 2468 | else { | 2468 | else { |
| 2469 | hist_err("Invalid field modifier: ", modifier); | ||
| 2469 | field = ERR_PTR(-EINVAL); | 2470 | field = ERR_PTR(-EINVAL); |
| 2470 | goto out; | 2471 | goto out; |
| 2471 | } | 2472 | } |
| @@ -2481,6 +2482,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file, | |||
| 2481 | else { | 2482 | else { |
| 2482 | field = trace_find_event_field(file->event_call, field_name); | 2483 | field = trace_find_event_field(file->event_call, field_name); |
| 2483 | if (!field || !field->size) { | 2484 | if (!field || !field->size) { |
| 2485 | hist_err("Couldn't find field: ", field_name); | ||
| 2484 | field = ERR_PTR(-EINVAL); | 2486 | field = ERR_PTR(-EINVAL); |
| 2485 | goto out; | 2487 | goto out; |
| 2486 | } | 2488 | } |
| @@ -4913,6 +4915,16 @@ static void hist_field_print(struct seq_file *m, struct hist_field *hist_field) | |||
| 4913 | seq_printf(m, "%s", field_name); | 4915 | seq_printf(m, "%s", field_name); |
| 4914 | } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) | 4916 | } else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP) |
| 4915 | seq_puts(m, "common_timestamp"); | 4917 | seq_puts(m, "common_timestamp"); |
| 4918 | |||
| 4919 | if (hist_field->flags) { | ||
| 4920 | if (!(hist_field->flags & HIST_FIELD_FL_VAR_REF) && | ||
| 4921 | !(hist_field->flags & HIST_FIELD_FL_EXPR)) { | ||
| 4922 | const char *flags = get_hist_field_flags(hist_field); | ||
| 4923 | |||
| 4924 | if (flags) | ||
| 4925 | seq_printf(m, ".%s", flags); | ||
| 4926 | } | ||
| 4927 | } | ||
| 4916 | } | 4928 | } |
| 4917 | 4929 | ||
| 4918 | static int event_hist_trigger_print(struct seq_file *m, | 4930 | static int event_hist_trigger_print(struct seq_file *m, |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 3c7bfc4bf5e9..4237eba4ef20 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -472,7 +472,7 @@ static __init int stack_trace_init(void) | |||
| 472 | NULL, &stack_trace_fops); | 472 | NULL, &stack_trace_fops); |
| 473 | 473 | ||
| 474 | #ifdef CONFIG_DYNAMIC_FTRACE | 474 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 475 | trace_create_file("stack_trace_filter", 0444, d_tracer, | 475 | trace_create_file("stack_trace_filter", 0644, d_tracer, |
| 476 | &trace_ops, &stack_trace_filter_fops); | 476 | &trace_ops, &stack_trace_filter_fops); |
| 477 | #endif | 477 | #endif |
| 478 | 478 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 34fd0e0ec51d..ac892878dbe6 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -55,6 +55,7 @@ struct trace_uprobe { | |||
| 55 | struct list_head list; | 55 | struct list_head list; |
| 56 | struct trace_uprobe_filter filter; | 56 | struct trace_uprobe_filter filter; |
| 57 | struct uprobe_consumer consumer; | 57 | struct uprobe_consumer consumer; |
| 58 | struct path path; | ||
| 58 | struct inode *inode; | 59 | struct inode *inode; |
| 59 | char *filename; | 60 | char *filename; |
| 60 | unsigned long offset; | 61 | unsigned long offset; |
| @@ -289,7 +290,7 @@ static void free_trace_uprobe(struct trace_uprobe *tu) | |||
| 289 | for (i = 0; i < tu->tp.nr_args; i++) | 290 | for (i = 0; i < tu->tp.nr_args; i++) |
| 290 | traceprobe_free_probe_arg(&tu->tp.args[i]); | 291 | traceprobe_free_probe_arg(&tu->tp.args[i]); |
| 291 | 292 | ||
| 292 | iput(tu->inode); | 293 | path_put(&tu->path); |
| 293 | kfree(tu->tp.call.class->system); | 294 | kfree(tu->tp.call.class->system); |
| 294 | kfree(tu->tp.call.name); | 295 | kfree(tu->tp.call.name); |
| 295 | kfree(tu->filename); | 296 | kfree(tu->filename); |
| @@ -363,7 +364,6 @@ end: | |||
| 363 | static int create_trace_uprobe(int argc, char **argv) | 364 | static int create_trace_uprobe(int argc, char **argv) |
| 364 | { | 365 | { |
| 365 | struct trace_uprobe *tu; | 366 | struct trace_uprobe *tu; |
| 366 | struct inode *inode; | ||
| 367 | char *arg, *event, *group, *filename; | 367 | char *arg, *event, *group, *filename; |
| 368 | char buf[MAX_EVENT_NAME_LEN]; | 368 | char buf[MAX_EVENT_NAME_LEN]; |
| 369 | struct path path; | 369 | struct path path; |
| @@ -371,7 +371,6 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 371 | bool is_delete, is_return; | 371 | bool is_delete, is_return; |
| 372 | int i, ret; | 372 | int i, ret; |
| 373 | 373 | ||
| 374 | inode = NULL; | ||
| 375 | ret = 0; | 374 | ret = 0; |
| 376 | is_delete = false; | 375 | is_delete = false; |
| 377 | is_return = false; | 376 | is_return = false; |
| @@ -437,21 +436,16 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 437 | } | 436 | } |
| 438 | /* Find the last occurrence, in case the path contains ':' too. */ | 437 | /* Find the last occurrence, in case the path contains ':' too. */ |
| 439 | arg = strrchr(argv[1], ':'); | 438 | arg = strrchr(argv[1], ':'); |
| 440 | if (!arg) { | 439 | if (!arg) |
| 441 | ret = -EINVAL; | 440 | return -EINVAL; |
| 442 | goto fail_address_parse; | ||
| 443 | } | ||
| 444 | 441 | ||
| 445 | *arg++ = '\0'; | 442 | *arg++ = '\0'; |
| 446 | filename = argv[1]; | 443 | filename = argv[1]; |
| 447 | ret = kern_path(filename, LOOKUP_FOLLOW, &path); | 444 | ret = kern_path(filename, LOOKUP_FOLLOW, &path); |
| 448 | if (ret) | 445 | if (ret) |
| 449 | goto fail_address_parse; | 446 | return ret; |
| 450 | |||
| 451 | inode = igrab(d_real_inode(path.dentry)); | ||
| 452 | path_put(&path); | ||
| 453 | 447 | ||
| 454 | if (!inode || !S_ISREG(inode->i_mode)) { | 448 | if (!d_is_reg(path.dentry)) { |
| 455 | ret = -EINVAL; | 449 | ret = -EINVAL; |
| 456 | goto fail_address_parse; | 450 | goto fail_address_parse; |
| 457 | } | 451 | } |
| @@ -490,7 +484,7 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 490 | goto fail_address_parse; | 484 | goto fail_address_parse; |
| 491 | } | 485 | } |
| 492 | tu->offset = offset; | 486 | tu->offset = offset; |
| 493 | tu->inode = inode; | 487 | tu->path = path; |
| 494 | tu->filename = kstrdup(filename, GFP_KERNEL); | 488 | tu->filename = kstrdup(filename, GFP_KERNEL); |
| 495 | 489 | ||
| 496 | if (!tu->filename) { | 490 | if (!tu->filename) { |
| @@ -558,7 +552,7 @@ error: | |||
| 558 | return ret; | 552 | return ret; |
| 559 | 553 | ||
| 560 | fail_address_parse: | 554 | fail_address_parse: |
| 561 | iput(inode); | 555 | path_put(&path); |
| 562 | 556 | ||
| 563 | pr_info("Failed to parse address or file.\n"); | 557 | pr_info("Failed to parse address or file.\n"); |
| 564 | 558 | ||
| @@ -922,6 +916,7 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, | |||
| 922 | goto err_flags; | 916 | goto err_flags; |
| 923 | 917 | ||
| 924 | tu->consumer.filter = filter; | 918 | tu->consumer.filter = filter; |
| 919 | tu->inode = d_real_inode(tu->path.dentry); | ||
| 925 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 920 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
| 926 | if (ret) | 921 | if (ret) |
| 927 | goto err_buffer; | 922 | goto err_buffer; |
| @@ -967,6 +962,7 @@ probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) | |||
| 967 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 962 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 968 | 963 | ||
| 969 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 964 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
| 965 | tu->inode = NULL; | ||
| 970 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; | 966 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; |
| 971 | 967 | ||
| 972 | uprobe_buffer_disable(); | 968 | uprobe_buffer_disable(); |
| @@ -1337,7 +1333,6 @@ struct trace_event_call * | |||
| 1337 | create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) | 1333 | create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) |
| 1338 | { | 1334 | { |
| 1339 | struct trace_uprobe *tu; | 1335 | struct trace_uprobe *tu; |
| 1340 | struct inode *inode; | ||
| 1341 | struct path path; | 1336 | struct path path; |
| 1342 | int ret; | 1337 | int ret; |
| 1343 | 1338 | ||
| @@ -1345,11 +1340,8 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) | |||
| 1345 | if (ret) | 1340 | if (ret) |
| 1346 | return ERR_PTR(ret); | 1341 | return ERR_PTR(ret); |
| 1347 | 1342 | ||
| 1348 | inode = igrab(d_inode(path.dentry)); | 1343 | if (!d_is_reg(path.dentry)) { |
| 1349 | path_put(&path); | 1344 | path_put(&path); |
| 1350 | |||
| 1351 | if (!inode || !S_ISREG(inode->i_mode)) { | ||
| 1352 | iput(inode); | ||
| 1353 | return ERR_PTR(-EINVAL); | 1345 | return ERR_PTR(-EINVAL); |
| 1354 | } | 1346 | } |
| 1355 | 1347 | ||
| @@ -1364,11 +1356,12 @@ create_local_trace_uprobe(char *name, unsigned long offs, bool is_return) | |||
| 1364 | if (IS_ERR(tu)) { | 1356 | if (IS_ERR(tu)) { |
| 1365 | pr_info("Failed to allocate trace_uprobe.(%d)\n", | 1357 | pr_info("Failed to allocate trace_uprobe.(%d)\n", |
| 1366 | (int)PTR_ERR(tu)); | 1358 | (int)PTR_ERR(tu)); |
| 1359 | path_put(&path); | ||
| 1367 | return ERR_CAST(tu); | 1360 | return ERR_CAST(tu); |
| 1368 | } | 1361 | } |
| 1369 | 1362 | ||
| 1370 | tu->offset = offs; | 1363 | tu->offset = offs; |
| 1371 | tu->inode = inode; | 1364 | tu->path = path; |
| 1372 | tu->filename = kstrdup(name, GFP_KERNEL); | 1365 | tu->filename = kstrdup(name, GFP_KERNEL); |
| 1373 | init_trace_event_call(tu, &tu->tp.call); | 1366 | init_trace_event_call(tu, &tu->tp.call); |
| 1374 | 1367 | ||
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 671b13457387..1e37da2e0c25 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
| @@ -207,7 +207,7 @@ static int tracepoint_add_func(struct tracepoint *tp, | |||
| 207 | lockdep_is_held(&tracepoints_mutex)); | 207 | lockdep_is_held(&tracepoints_mutex)); |
| 208 | old = func_add(&tp_funcs, func, prio); | 208 | old = func_add(&tp_funcs, func, prio); |
| 209 | if (IS_ERR(old)) { | 209 | if (IS_ERR(old)) { |
| 210 | WARN_ON_ONCE(1); | 210 | WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); |
| 211 | return PTR_ERR(old); | 211 | return PTR_ERR(old); |
| 212 | } | 212 | } |
| 213 | 213 | ||
| @@ -239,7 +239,7 @@ static int tracepoint_remove_func(struct tracepoint *tp, | |||
| 239 | lockdep_is_held(&tracepoints_mutex)); | 239 | lockdep_is_held(&tracepoints_mutex)); |
| 240 | old = func_remove(&tp_funcs, func); | 240 | old = func_remove(&tp_funcs, func); |
| 241 | if (IS_ERR(old)) { | 241 | if (IS_ERR(old)) { |
| 242 | WARN_ON_ONCE(1); | 242 | WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM); |
| 243 | return PTR_ERR(old); | 243 | return PTR_ERR(old); |
| 244 | } | 244 | } |
| 245 | 245 | ||
