aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink/af_netlink.c
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2014-12-16 17:58:17 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-18 12:35:23 -0500
commit4682a0358639b29cf69437ed909c6221f8c89847 (patch)
tree85469362ce075e5d59ebd3559f5e590749432e24 /net/netlink/af_netlink.c
parent65891feac27e26115dc4cce881743a1ac33372df (diff)
netlink: Always copy on mmap TX.
Checking the file f_count and the nlk->mapped count is not completely sufficient to prevent the mmap'd area contents from changing from under us during netlink mmap sendmsg() operations. Be careful to sample the header's length field only once, because this could change from under us as well. Fixes: 5fd96123ee19 ("netlink: implement memory mapped sendmsg()") Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Daniel Borkmann <dborkman@redhat.com> Acked-by: Thomas Graf <tgraf@suug.ch>
Diffstat (limited to 'net/netlink/af_netlink.c')
-rw-r--r--net/netlink/af_netlink.c52
1 files changed, 16 insertions, 36 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ef5f77b44ec7..a64680a3e782 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -525,14 +525,14 @@ out:
525 return err; 525 return err;
526} 526}
527 527
528static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr) 528static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
529{ 529{
530#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1 530#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
531 struct page *p_start, *p_end; 531 struct page *p_start, *p_end;
532 532
533 /* First page is flushed through netlink_{get,set}_status */ 533 /* First page is flushed through netlink_{get,set}_status */
534 p_start = pgvec_to_page(hdr + PAGE_SIZE); 534 p_start = pgvec_to_page(hdr + PAGE_SIZE);
535 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1); 535 p_end = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
536 while (p_start <= p_end) { 536 while (p_start <= p_end) {
537 flush_dcache_page(p_start); 537 flush_dcache_page(p_start);
538 p_start++; 538 p_start++;
@@ -714,24 +714,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
714 struct nl_mmap_hdr *hdr; 714 struct nl_mmap_hdr *hdr;
715 struct sk_buff *skb; 715 struct sk_buff *skb;
716 unsigned int maxlen; 716 unsigned int maxlen;
717 bool excl = true;
718 int err = 0, len = 0; 717 int err = 0, len = 0;
719 718
720 /* Netlink messages are validated by the receiver before processing.
721 * In order to avoid userspace changing the contents of the message
722 * after validation, the socket and the ring may only be used by a
723 * single process, otherwise we fall back to copying.
724 */
725 if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
726 atomic_read(&nlk->mapped) > 1)
727 excl = false;
728
729 mutex_lock(&nlk->pg_vec_lock); 719 mutex_lock(&nlk->pg_vec_lock);
730 720
731 ring = &nlk->tx_ring; 721 ring = &nlk->tx_ring;
732 maxlen = ring->frame_size - NL_MMAP_HDRLEN; 722 maxlen = ring->frame_size - NL_MMAP_HDRLEN;
733 723
734 do { 724 do {
725 unsigned int nm_len;
726
735 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID); 727 hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
736 if (hdr == NULL) { 728 if (hdr == NULL) {
737 if (!(msg->msg_flags & MSG_DONTWAIT) && 729 if (!(msg->msg_flags & MSG_DONTWAIT) &&
@@ -739,35 +731,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
739 schedule(); 731 schedule();
740 continue; 732 continue;
741 } 733 }
742 if (hdr->nm_len > maxlen) { 734
735 nm_len = ACCESS_ONCE(hdr->nm_len);
736 if (nm_len > maxlen) {
743 err = -EINVAL; 737 err = -EINVAL;
744 goto out; 738 goto out;
745 } 739 }
746 740
747 netlink_frame_flush_dcache(hdr); 741 netlink_frame_flush_dcache(hdr, nm_len);
748 742
749 if (likely(dst_portid == 0 && dst_group == 0 && excl)) { 743 skb = alloc_skb(nm_len, GFP_KERNEL);
750 skb = alloc_skb_head(GFP_KERNEL); 744 if (skb == NULL) {
751 if (skb == NULL) { 745 err = -ENOBUFS;
752 err = -ENOBUFS; 746 goto out;
753 goto out;
754 }
755 sock_hold(sk);
756 netlink_ring_setup_skb(skb, sk, ring, hdr);
757 NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
758 __skb_put(skb, hdr->nm_len);
759 netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
760 atomic_inc(&ring->pending);
761 } else {
762 skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
763 if (skb == NULL) {
764 err = -ENOBUFS;
765 goto out;
766 }
767 __skb_put(skb, hdr->nm_len);
768 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
769 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
770 } 747 }
748 __skb_put(skb, nm_len);
749 memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
750 netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
771 751
772 netlink_increment_head(ring); 752 netlink_increment_head(ring);
773 753
@@ -813,7 +793,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
813 hdr->nm_pid = NETLINK_CB(skb).creds.pid; 793 hdr->nm_pid = NETLINK_CB(skb).creds.pid;
814 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid); 794 hdr->nm_uid = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
815 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid); 795 hdr->nm_gid = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
816 netlink_frame_flush_dcache(hdr); 796 netlink_frame_flush_dcache(hdr, hdr->nm_len);
817 netlink_set_status(hdr, NL_MMAP_STATUS_VALID); 797 netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
818 798
819 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED; 799 NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;