aboutsummaryrefslogtreecommitdiffstats
path: root/net/ceph
diff options
context:
space:
mode:
authorChunwei Chen <tuxoko@gmail.com>2014-04-23 00:35:09 -0400
committerIlya Dryomov <ilya.dryomov@inktank.com>2014-05-16 13:29:26 -0400
commit178eda29ca721842f2146378e73d43e0044c4166 (patch)
tree0a4e1518e04a719ca1c798c0c4a90a6bbe0f4bd9 /net/ceph
parentd6d211db37e75de2ddc3a4f979038c40df7cc79c (diff)
libceph: fix corruption when using page_count 0 page in rbd
It has been reported that using ZFSonLinux on rbd will result in memory corruption. The bug report can be found here: https://github.com/zfsonlinux/spl/issues/241 http://tracker.ceph.com/issues/7790 The reason is that ZFS will send pages with page_count 0 into rbd, which in turns send them to tcp_sendpage. However, tcp_sendpage cannot deal with page_count 0, as it will do get_page and put_page, and erroneously free the page. This type of issue has been noted before, and handled in iscsi, drbd, etc. So, rbd should also handle this. This fix address this issue by fall back to slower sendmsg when page_count 0 detected. Cc: Sage Weil <sage@inktank.com> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: stable@vger.kernel.org Signed-off-by: Chunwei Chen <tuxoko@gmail.com> Reviewed-by: Ilya Dryomov <ilya.dryomov@inktank.com>
Diffstat (limited to 'net/ceph')
-rw-r--r--net/ceph/messenger.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dac7f9b98687..1948d592aa54 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -557,7 +557,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
557 return r; 557 return r;
558} 558}
559 559
560static int ceph_tcp_sendpage(struct socket *sock, struct page *page, 560static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
561 int offset, size_t size, bool more) 561 int offset, size_t size, bool more)
562{ 562{
563 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); 563 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
@@ -570,6 +570,24 @@ static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
570 return ret; 570 return ret;
571} 571}
572 572
573static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
574 int offset, size_t size, bool more)
575{
576 int ret;
577 struct kvec iov;
578
579 /* sendpage cannot properly handle pages with page_count == 0,
580 * we need to fallback to sendmsg if that's the case */
581 if (page_count(page) >= 1)
582 return __ceph_tcp_sendpage(sock, page, offset, size, more);
583
584 iov.iov_base = kmap(page) + offset;
585 iov.iov_len = size;
586 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
587 kunmap(page);
588
589 return ret;
590}
573 591
574/* 592/*
575 * Shutdown/close the socket for the given connection. 593 * Shutdown/close the socket for the given connection.