aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/ceph/Kconfig14
-rw-r--r--fs/ceph/Makefile11
-rw-r--r--fs/ceph/README20
-rw-r--r--fs/ceph/addr.c65
-rw-r--r--fs/ceph/armor.c103
-rw-r--r--fs/ceph/auth.c259
-rw-r--r--fs/ceph/auth.h92
-rw-r--r--fs/ceph/auth_none.c131
-rw-r--r--fs/ceph/auth_none.h30
-rw-r--r--fs/ceph/auth_x.c687
-rw-r--r--fs/ceph/auth_x.h49
-rw-r--r--fs/ceph/auth_x_protocol.h90
-rw-r--r--fs/ceph/buffer.c65
-rw-r--r--fs/ceph/buffer.h39
-rw-r--r--fs/ceph/caps.c35
-rw-r--r--fs/ceph/ceph_debug.h37
-rw-r--r--fs/ceph/ceph_frag.c3
-rw-r--r--fs/ceph/ceph_frag.h109
-rw-r--r--fs/ceph/ceph_fs.c72
-rw-r--r--fs/ceph/ceph_fs.h728
-rw-r--r--fs/ceph/ceph_hash.c118
-rw-r--r--fs/ceph/ceph_hash.h13
-rw-r--r--fs/ceph/crush/crush.c151
-rw-r--r--fs/ceph/crush/crush.h180
-rw-r--r--fs/ceph/crush/hash.c149
-rw-r--r--fs/ceph/crush/hash.h17
-rw-r--r--fs/ceph/crush/mapper.c609
-rw-r--r--fs/ceph/crush/mapper.h20
-rw-r--r--fs/ceph/crypto.c412
-rw-r--r--fs/ceph/crypto.h48
-rw-r--r--fs/ceph/debugfs.c407
-rw-r--r--fs/ceph/decode.h201
-rw-r--r--fs/ceph/dir.c55
-rw-r--r--fs/ceph/export.c5
-rw-r--r--fs/ceph/file.c207
-rw-r--r--fs/ceph/inode.c19
-rw-r--r--fs/ceph/ioctl.c11
-rw-r--r--fs/ceph/locks.c6
-rw-r--r--fs/ceph/mds_client.c86
-rw-r--r--fs/ceph/mds_client.h20
-rw-r--r--fs/ceph/mdsmap.c11
-rw-r--r--fs/ceph/mdsmap.h62
-rw-r--r--fs/ceph/messenger.c2432
-rw-r--r--fs/ceph/messenger.h257
-rw-r--r--fs/ceph/mon_client.c1018
-rw-r--r--fs/ceph/mon_client.h121
-rw-r--r--fs/ceph/msgpool.c64
-rw-r--r--fs/ceph/msgpool.h25
-rw-r--r--fs/ceph/msgr.h175
-rw-r--r--fs/ceph/osd_client.c1771
-rw-r--r--fs/ceph/osd_client.h233
-rw-r--r--fs/ceph/osdmap.c1123
-rw-r--r--fs/ceph/osdmap.h130
-rw-r--r--fs/ceph/pagelist.c63
-rw-r--r--fs/ceph/pagelist.h54
-rw-r--r--fs/ceph/rados.h405
-rw-r--r--fs/ceph/snap.c10
-rw-r--r--fs/ceph/strings.c (renamed from fs/ceph/ceph_strings.c)82
-rw-r--r--fs/ceph/super.c1154
-rw-r--r--fs/ceph/super.h397
-rw-r--r--fs/ceph/types.h29
-rw-r--r--fs/ceph/xattr.c15
62 files changed, 926 insertions, 14078 deletions
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 0fcd2640c23f..9eb134ea6eb2 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -1,9 +1,11 @@
1config CEPH_FS 1config CEPH_FS
2 tristate "Ceph distributed file system (EXPERIMENTAL)" 2 tristate "Ceph distributed file system (EXPERIMENTAL)"
3 depends on INET && EXPERIMENTAL 3 depends on INET && EXPERIMENTAL
4 select CEPH_LIB
4 select LIBCRC32C 5 select LIBCRC32C
5 select CRYPTO_AES 6 select CRYPTO_AES
6 select CRYPTO 7 select CRYPTO
8 default n
7 help 9 help
8 Choose Y or M here to include support for mounting the 10 Choose Y or M here to include support for mounting the
9 experimental Ceph distributed file system. Ceph is an extremely 11 experimental Ceph distributed file system. Ceph is an extremely
@@ -14,15 +16,3 @@ config CEPH_FS
14 16
15 If unsure, say N. 17 If unsure, say N.
16 18
17config CEPH_FS_PRETTYDEBUG
18 bool "Include file:line in ceph debug output"
19 depends on CEPH_FS
20 default n
21 help
22 If you say Y here, debug output will include a filename and
23 line to aid debugging. This icnreases kernel size and slows
24 execution slightly when debug call sites are enabled (e.g.,
25 via CONFIG_DYNAMIC_DEBUG).
26
27 If unsure, say N.
28
diff --git a/fs/ceph/Makefile b/fs/ceph/Makefile
index 278e1172600d..9e6c4f2e8ff1 100644
--- a/fs/ceph/Makefile
+++ b/fs/ceph/Makefile
@@ -8,15 +8,8 @@ obj-$(CONFIG_CEPH_FS) += ceph.o
8 8
9ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \ 9ceph-objs := super.o inode.o dir.o file.o locks.o addr.o ioctl.o \
10 export.o caps.o snap.o xattr.o \ 10 export.o caps.o snap.o xattr.o \
11 messenger.o msgpool.o buffer.o pagelist.o \ 11 mds_client.o mdsmap.o strings.o ceph_frag.o \
12 mds_client.o mdsmap.o \ 12 debugfs.o
13 mon_client.o \
14 osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
15 debugfs.o \
16 auth.o auth_none.o \
17 crypto.o armor.o \
18 auth_x.o \
19 ceph_fs.o ceph_strings.o ceph_hash.o ceph_frag.o
20 13
21else 14else
22#Otherwise we were called directly from the command 15#Otherwise we were called directly from the command
diff --git a/fs/ceph/README b/fs/ceph/README
deleted file mode 100644
index 18352fab37c0..000000000000
--- a/fs/ceph/README
+++ /dev/null
@@ -1,20 +0,0 @@
1#
2# The following files are shared by (and manually synchronized
3# between) the Ceph userland and kernel client.
4#
5# userland kernel
6src/include/ceph_fs.h fs/ceph/ceph_fs.h
7src/include/ceph_fs.cc fs/ceph/ceph_fs.c
8src/include/msgr.h fs/ceph/msgr.h
9src/include/rados.h fs/ceph/rados.h
10src/include/ceph_strings.cc fs/ceph/ceph_strings.c
11src/include/ceph_frag.h fs/ceph/ceph_frag.h
12src/include/ceph_frag.cc fs/ceph/ceph_frag.c
13src/include/ceph_hash.h fs/ceph/ceph_hash.h
14src/include/ceph_hash.cc fs/ceph/ceph_hash.c
15src/crush/crush.c fs/ceph/crush/crush.c
16src/crush/crush.h fs/ceph/crush/crush.h
17src/crush/mapper.c fs/ceph/crush/mapper.c
18src/crush/mapper.h fs/ceph/crush/mapper.h
19src/crush/hash.h fs/ceph/crush/hash.h
20src/crush/hash.c fs/ceph/crush/hash.c
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index efbc604001c8..51bcc5ce3230 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/backing-dev.h> 3#include <linux/backing-dev.h>
4#include <linux/fs.h> 4#include <linux/fs.h>
@@ -10,7 +10,8 @@
10#include <linux/task_io_accounting_ops.h> 10#include <linux/task_io_accounting_ops.h>
11 11
12#include "super.h" 12#include "super.h"
13#include "osd_client.h" 13#include "mds_client.h"
14#include <linux/ceph/osd_client.h>
14 15
15/* 16/*
16 * Ceph address space ops. 17 * Ceph address space ops.
@@ -193,7 +194,8 @@ static int readpage_nounlock(struct file *filp, struct page *page)
193{ 194{
194 struct inode *inode = filp->f_dentry->d_inode; 195 struct inode *inode = filp->f_dentry->d_inode;
195 struct ceph_inode_info *ci = ceph_inode(inode); 196 struct ceph_inode_info *ci = ceph_inode(inode);
196 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; 197 struct ceph_osd_client *osdc =
198 &ceph_inode_to_client(inode)->client->osdc;
197 int err = 0; 199 int err = 0;
198 u64 len = PAGE_CACHE_SIZE; 200 u64 len = PAGE_CACHE_SIZE;
199 201
@@ -265,7 +267,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping,
265{ 267{
266 struct inode *inode = file->f_dentry->d_inode; 268 struct inode *inode = file->f_dentry->d_inode;
267 struct ceph_inode_info *ci = ceph_inode(inode); 269 struct ceph_inode_info *ci = ceph_inode(inode);
268 struct ceph_osd_client *osdc = &ceph_inode_to_client(inode)->osdc; 270 struct ceph_osd_client *osdc =
271 &ceph_inode_to_client(inode)->client->osdc;
269 int rc = 0; 272 int rc = 0;
270 struct page **pages; 273 struct page **pages;
271 loff_t offset; 274 loff_t offset;
@@ -365,7 +368,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
365{ 368{
366 struct inode *inode; 369 struct inode *inode;
367 struct ceph_inode_info *ci; 370 struct ceph_inode_info *ci;
368 struct ceph_client *client; 371 struct ceph_fs_client *fsc;
369 struct ceph_osd_client *osdc; 372 struct ceph_osd_client *osdc;
370 loff_t page_off = page->index << PAGE_CACHE_SHIFT; 373 loff_t page_off = page->index << PAGE_CACHE_SHIFT;
371 int len = PAGE_CACHE_SIZE; 374 int len = PAGE_CACHE_SIZE;
@@ -383,8 +386,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
383 } 386 }
384 inode = page->mapping->host; 387 inode = page->mapping->host;
385 ci = ceph_inode(inode); 388 ci = ceph_inode(inode);
386 client = ceph_inode_to_client(inode); 389 fsc = ceph_inode_to_client(inode);
387 osdc = &client->osdc; 390 osdc = &fsc->client->osdc;
388 391
389 /* verify this is a writeable snap context */ 392 /* verify this is a writeable snap context */
390 snapc = (void *)page->private; 393 snapc = (void *)page->private;
@@ -414,10 +417,10 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
414 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n", 417 dout("writepage %p page %p index %lu on %llu~%u snapc %p\n",
415 inode, page, page->index, page_off, len, snapc); 418 inode, page, page->index, page_off, len, snapc);
416 419
417 writeback_stat = atomic_long_inc_return(&client->writeback_count); 420 writeback_stat = atomic_long_inc_return(&fsc->writeback_count);
418 if (writeback_stat > 421 if (writeback_stat >
419 CONGESTION_ON_THRESH(client->mount_args->congestion_kb)) 422 CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
420 set_bdi_congested(&client->backing_dev_info, BLK_RW_ASYNC); 423 set_bdi_congested(&fsc->backing_dev_info, BLK_RW_ASYNC);
421 424
422 set_page_writeback(page); 425 set_page_writeback(page);
423 err = ceph_osdc_writepages(osdc, ceph_vino(inode), 426 err = ceph_osdc_writepages(osdc, ceph_vino(inode),
@@ -496,7 +499,7 @@ static void writepages_finish(struct ceph_osd_request *req,
496 struct address_space *mapping = inode->i_mapping; 499 struct address_space *mapping = inode->i_mapping;
497 __s32 rc = -EIO; 500 __s32 rc = -EIO;
498 u64 bytes = 0; 501 u64 bytes = 0;
499 struct ceph_client *client = ceph_inode_to_client(inode); 502 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
500 long writeback_stat; 503 long writeback_stat;
501 unsigned issued = ceph_caps_issued(ci); 504 unsigned issued = ceph_caps_issued(ci);
502 505
@@ -529,10 +532,10 @@ static void writepages_finish(struct ceph_osd_request *req,
529 WARN_ON(!PageUptodate(page)); 532 WARN_ON(!PageUptodate(page));
530 533
531 writeback_stat = 534 writeback_stat =
532 atomic_long_dec_return(&client->writeback_count); 535 atomic_long_dec_return(&fsc->writeback_count);
533 if (writeback_stat < 536 if (writeback_stat <
534 CONGESTION_OFF_THRESH(client->mount_args->congestion_kb)) 537 CONGESTION_OFF_THRESH(fsc->mount_options->congestion_kb))
535 clear_bdi_congested(&client->backing_dev_info, 538 clear_bdi_congested(&fsc->backing_dev_info,
536 BLK_RW_ASYNC); 539 BLK_RW_ASYNC);
537 540
538 ceph_put_snap_context((void *)page->private); 541 ceph_put_snap_context((void *)page->private);
@@ -569,13 +572,13 @@ static void writepages_finish(struct ceph_osd_request *req,
569 * mempool. we avoid the mempool if we can because req->r_num_pages 572 * mempool. we avoid the mempool if we can because req->r_num_pages
570 * may be less than the maximum write size. 573 * may be less than the maximum write size.
571 */ 574 */
572static void alloc_page_vec(struct ceph_client *client, 575static void alloc_page_vec(struct ceph_fs_client *fsc,
573 struct ceph_osd_request *req) 576 struct ceph_osd_request *req)
574{ 577{
575 req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages, 578 req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages,
576 GFP_NOFS); 579 GFP_NOFS);
577 if (!req->r_pages) { 580 if (!req->r_pages) {
578 req->r_pages = mempool_alloc(client->wb_pagevec_pool, GFP_NOFS); 581 req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS);
579 req->r_pages_from_pool = 1; 582 req->r_pages_from_pool = 1;
580 WARN_ON(!req->r_pages); 583 WARN_ON(!req->r_pages);
581 } 584 }
@@ -590,7 +593,7 @@ static int ceph_writepages_start(struct address_space *mapping,
590 struct inode *inode = mapping->host; 593 struct inode *inode = mapping->host;
591 struct backing_dev_info *bdi = mapping->backing_dev_info; 594 struct backing_dev_info *bdi = mapping->backing_dev_info;
592 struct ceph_inode_info *ci = ceph_inode(inode); 595 struct ceph_inode_info *ci = ceph_inode(inode);
593 struct ceph_client *client; 596 struct ceph_fs_client *fsc;
594 pgoff_t index, start, end; 597 pgoff_t index, start, end;
595 int range_whole = 0; 598 int range_whole = 0;
596 int should_loop = 1; 599 int should_loop = 1;
@@ -617,13 +620,13 @@ static int ceph_writepages_start(struct address_space *mapping,
617 wbc->sync_mode == WB_SYNC_NONE ? "NONE" : 620 wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
618 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD")); 621 (wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
619 622
620 client = ceph_inode_to_client(inode); 623 fsc = ceph_inode_to_client(inode);
621 if (client->mount_state == CEPH_MOUNT_SHUTDOWN) { 624 if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
622 pr_warning("writepage_start %p on forced umount\n", inode); 625 pr_warning("writepage_start %p on forced umount\n", inode);
623 return -EIO; /* we're in a forced umount, don't write! */ 626 return -EIO; /* we're in a forced umount, don't write! */
624 } 627 }
625 if (client->mount_args->wsize && client->mount_args->wsize < wsize) 628 if (fsc->mount_options->wsize && fsc->mount_options->wsize < wsize)
626 wsize = client->mount_args->wsize; 629 wsize = fsc->mount_options->wsize;
627 if (wsize < PAGE_CACHE_SIZE) 630 if (wsize < PAGE_CACHE_SIZE)
628 wsize = PAGE_CACHE_SIZE; 631 wsize = PAGE_CACHE_SIZE;
629 max_pages_ever = wsize >> PAGE_CACHE_SHIFT; 632 max_pages_ever = wsize >> PAGE_CACHE_SHIFT;
@@ -769,7 +772,7 @@ get_more_pages:
769 offset = (unsigned long long)page->index 772 offset = (unsigned long long)page->index
770 << PAGE_CACHE_SHIFT; 773 << PAGE_CACHE_SHIFT;
771 len = wsize; 774 len = wsize;
772 req = ceph_osdc_new_request(&client->osdc, 775 req = ceph_osdc_new_request(&fsc->client->osdc,
773 &ci->i_layout, 776 &ci->i_layout,
774 ceph_vino(inode), 777 ceph_vino(inode),
775 offset, &len, 778 offset, &len,
@@ -782,7 +785,7 @@ get_more_pages:
782 &inode->i_mtime, true, 1); 785 &inode->i_mtime, true, 1);
783 max_pages = req->r_num_pages; 786 max_pages = req->r_num_pages;
784 787
785 alloc_page_vec(client, req); 788 alloc_page_vec(fsc, req);
786 req->r_callback = writepages_finish; 789 req->r_callback = writepages_finish;
787 req->r_inode = inode; 790 req->r_inode = inode;
788 } 791 }
@@ -794,10 +797,10 @@ get_more_pages:
794 inode, page, page->index); 797 inode, page, page->index);
795 798
796 writeback_stat = 799 writeback_stat =
797 atomic_long_inc_return(&client->writeback_count); 800 atomic_long_inc_return(&fsc->writeback_count);
798 if (writeback_stat > CONGESTION_ON_THRESH( 801 if (writeback_stat > CONGESTION_ON_THRESH(
799 client->mount_args->congestion_kb)) { 802 fsc->mount_options->congestion_kb)) {
800 set_bdi_congested(&client->backing_dev_info, 803 set_bdi_congested(&fsc->backing_dev_info,
801 BLK_RW_ASYNC); 804 BLK_RW_ASYNC);
802 } 805 }
803 806
@@ -846,7 +849,7 @@ get_more_pages:
846 op->payload_len = cpu_to_le32(len); 849 op->payload_len = cpu_to_le32(len);
847 req->r_request->hdr.data_len = cpu_to_le32(len); 850 req->r_request->hdr.data_len = cpu_to_le32(len);
848 851
849 ceph_osdc_start_request(&client->osdc, req, true); 852 ceph_osdc_start_request(&fsc->client->osdc, req, true);
850 req = NULL; 853 req = NULL;
851 854
852 /* continue? */ 855 /* continue? */
@@ -915,7 +918,7 @@ static int ceph_update_writeable_page(struct file *file,
915{ 918{
916 struct inode *inode = file->f_dentry->d_inode; 919 struct inode *inode = file->f_dentry->d_inode;
917 struct ceph_inode_info *ci = ceph_inode(inode); 920 struct ceph_inode_info *ci = ceph_inode(inode);
918 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 921 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
919 loff_t page_off = pos & PAGE_CACHE_MASK; 922 loff_t page_off = pos & PAGE_CACHE_MASK;
920 int pos_in_page = pos & ~PAGE_CACHE_MASK; 923 int pos_in_page = pos & ~PAGE_CACHE_MASK;
921 int end_in_page = pos_in_page + len; 924 int end_in_page = pos_in_page + len;
@@ -1053,8 +1056,8 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
1053 struct page *page, void *fsdata) 1056 struct page *page, void *fsdata)
1054{ 1057{
1055 struct inode *inode = file->f_dentry->d_inode; 1058 struct inode *inode = file->f_dentry->d_inode;
1056 struct ceph_client *client = ceph_inode_to_client(inode); 1059 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1057 struct ceph_mds_client *mdsc = &client->mdsc; 1060 struct ceph_mds_client *mdsc = fsc->mdsc;
1058 unsigned from = pos & (PAGE_CACHE_SIZE - 1); 1061 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1059 int check_cap = 0; 1062 int check_cap = 0;
1060 1063
@@ -1123,7 +1126,7 @@ static int ceph_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
1123{ 1126{
1124 struct inode *inode = vma->vm_file->f_dentry->d_inode; 1127 struct inode *inode = vma->vm_file->f_dentry->d_inode;
1125 struct page *page = vmf->page; 1128 struct page *page = vmf->page;
1126 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 1129 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1127 loff_t off = page->index << PAGE_CACHE_SHIFT; 1130 loff_t off = page->index << PAGE_CACHE_SHIFT;
1128 loff_t size, len; 1131 loff_t size, len;
1129 int ret; 1132 int ret;
diff --git a/fs/ceph/armor.c b/fs/ceph/armor.c
deleted file mode 100644
index eb2a666b0be7..000000000000
--- a/fs/ceph/armor.c
+++ /dev/null
@@ -1,103 +0,0 @@
1
2#include <linux/errno.h>
3
4int ceph_armor(char *dst, const char *src, const char *end);
5int ceph_unarmor(char *dst, const char *src, const char *end);
6
7/*
8 * base64 encode/decode.
9 */
10
11static const char *pem_key =
12 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
13
14static int encode_bits(int c)
15{
16 return pem_key[c];
17}
18
19static int decode_bits(char c)
20{
21 if (c >= 'A' && c <= 'Z')
22 return c - 'A';
23 if (c >= 'a' && c <= 'z')
24 return c - 'a' + 26;
25 if (c >= '0' && c <= '9')
26 return c - '0' + 52;
27 if (c == '+')
28 return 62;
29 if (c == '/')
30 return 63;
31 if (c == '=')
32 return 0; /* just non-negative, please */
33 return -EINVAL;
34}
35
36int ceph_armor(char *dst, const char *src, const char *end)
37{
38 int olen = 0;
39 int line = 0;
40
41 while (src < end) {
42 unsigned char a, b, c;
43
44 a = *src++;
45 *dst++ = encode_bits(a >> 2);
46 if (src < end) {
47 b = *src++;
48 *dst++ = encode_bits(((a & 3) << 4) | (b >> 4));
49 if (src < end) {
50 c = *src++;
51 *dst++ = encode_bits(((b & 15) << 2) |
52 (c >> 6));
53 *dst++ = encode_bits(c & 63);
54 } else {
55 *dst++ = encode_bits((b & 15) << 2);
56 *dst++ = '=';
57 }
58 } else {
59 *dst++ = encode_bits(((a & 3) << 4));
60 *dst++ = '=';
61 *dst++ = '=';
62 }
63 olen += 4;
64 line += 4;
65 if (line == 64) {
66 line = 0;
67 *(dst++) = '\n';
68 olen++;
69 }
70 }
71 return olen;
72}
73
74int ceph_unarmor(char *dst, const char *src, const char *end)
75{
76 int olen = 0;
77
78 while (src < end) {
79 int a, b, c, d;
80
81 if (src < end && src[0] == '\n')
82 src++;
83 if (src + 4 > end)
84 return -EINVAL;
85 a = decode_bits(src[0]);
86 b = decode_bits(src[1]);
87 c = decode_bits(src[2]);
88 d = decode_bits(src[3]);
89 if (a < 0 || b < 0 || c < 0 || d < 0)
90 return -EINVAL;
91
92 *dst++ = (a << 2) | (b >> 4);
93 if (src[2] == '=')
94 return olen + 1;
95 *dst++ = ((b & 15) << 4) | (c >> 2);
96 if (src[3] == '=')
97 return olen + 2;
98 *dst++ = ((c & 3) << 6) | d;
99 olen += 3;
100 src += 4;
101 }
102 return olen;
103}
diff --git a/fs/ceph/auth.c b/fs/ceph/auth.c
deleted file mode 100644
index 6d2e30600627..000000000000
--- a/fs/ceph/auth.c
+++ /dev/null
@@ -1,259 +0,0 @@
1#include "ceph_debug.h"
2
3#include <linux/module.h>
4#include <linux/err.h>
5#include <linux/slab.h>
6
7#include "types.h"
8#include "auth_none.h"
9#include "auth_x.h"
10#include "decode.h"
11#include "super.h"
12
13#include "messenger.h"
14
15/*
16 * get protocol handler
17 */
18static u32 supported_protocols[] = {
19 CEPH_AUTH_NONE,
20 CEPH_AUTH_CEPHX
21};
22
23static int ceph_auth_init_protocol(struct ceph_auth_client *ac, int protocol)
24{
25 switch (protocol) {
26 case CEPH_AUTH_NONE:
27 return ceph_auth_none_init(ac);
28 case CEPH_AUTH_CEPHX:
29 return ceph_x_init(ac);
30 default:
31 return -ENOENT;
32 }
33}
34
35/*
36 * setup, teardown.
37 */
38struct ceph_auth_client *ceph_auth_init(const char *name, const char *secret)
39{
40 struct ceph_auth_client *ac;
41 int ret;
42
43 dout("auth_init name '%s' secret '%s'\n", name, secret);
44
45 ret = -ENOMEM;
46 ac = kzalloc(sizeof(*ac), GFP_NOFS);
47 if (!ac)
48 goto out;
49
50 ac->negotiating = true;
51 if (name)
52 ac->name = name;
53 else
54 ac->name = CEPH_AUTH_NAME_DEFAULT;
55 dout("auth_init name %s secret %s\n", ac->name, secret);
56 ac->secret = secret;
57 return ac;
58
59out:
60 return ERR_PTR(ret);
61}
62
63void ceph_auth_destroy(struct ceph_auth_client *ac)
64{
65 dout("auth_destroy %p\n", ac);
66 if (ac->ops)
67 ac->ops->destroy(ac);
68 kfree(ac);
69}
70
71/*
72 * Reset occurs when reconnecting to the monitor.
73 */
74void ceph_auth_reset(struct ceph_auth_client *ac)
75{
76 dout("auth_reset %p\n", ac);
77 if (ac->ops && !ac->negotiating)
78 ac->ops->reset(ac);
79 ac->negotiating = true;
80}
81
82int ceph_entity_name_encode(const char *name, void **p, void *end)
83{
84 int len = strlen(name);
85
86 if (*p + 2*sizeof(u32) + len > end)
87 return -ERANGE;
88 ceph_encode_32(p, CEPH_ENTITY_TYPE_CLIENT);
89 ceph_encode_32(p, len);
90 ceph_encode_copy(p, name, len);
91 return 0;
92}
93
94/*
95 * Initiate protocol negotiation with monitor. Include entity name
96 * and list supported protocols.
97 */
98int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len)
99{
100 struct ceph_mon_request_header *monhdr = buf;
101 void *p = monhdr + 1, *end = buf + len, *lenp;
102 int i, num;
103 int ret;
104
105 dout("auth_build_hello\n");
106 monhdr->have_version = 0;
107 monhdr->session_mon = cpu_to_le16(-1);
108 monhdr->session_mon_tid = 0;
109
110 ceph_encode_32(&p, 0); /* no protocol, yet */
111
112 lenp = p;
113 p += sizeof(u32);
114
115 ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
116 ceph_encode_8(&p, 1);
117 num = ARRAY_SIZE(supported_protocols);
118 ceph_encode_32(&p, num);
119 ceph_decode_need(&p, end, num * sizeof(u32), bad);
120 for (i = 0; i < num; i++)
121 ceph_encode_32(&p, supported_protocols[i]);
122
123 ret = ceph_entity_name_encode(ac->name, &p, end);
124 if (ret < 0)
125 return ret;
126 ceph_decode_need(&p, end, sizeof(u64), bad);
127 ceph_encode_64(&p, ac->global_id);
128
129 ceph_encode_32(&lenp, p - lenp - sizeof(u32));
130 return p - buf;
131
132bad:
133 return -ERANGE;
134}
135
136static int ceph_build_auth_request(struct ceph_auth_client *ac,
137 void *msg_buf, size_t msg_len)
138{
139 struct ceph_mon_request_header *monhdr = msg_buf;
140 void *p = monhdr + 1;
141 void *end = msg_buf + msg_len;
142 int ret;
143
144 monhdr->have_version = 0;
145 monhdr->session_mon = cpu_to_le16(-1);
146 monhdr->session_mon_tid = 0;
147
148 ceph_encode_32(&p, ac->protocol);
149
150 ret = ac->ops->build_request(ac, p + sizeof(u32), end);
151 if (ret < 0) {
152 pr_err("error %d building auth method %s request\n", ret,
153 ac->ops->name);
154 return ret;
155 }
156 dout(" built request %d bytes\n", ret);
157 ceph_encode_32(&p, ret);
158 return p + ret - msg_buf;
159}
160
161/*
162 * Handle auth message from monitor.
163 */
164int ceph_handle_auth_reply(struct ceph_auth_client *ac,
165 void *buf, size_t len,
166 void *reply_buf, size_t reply_len)
167{
168 void *p = buf;
169 void *end = buf + len;
170 int protocol;
171 s32 result;
172 u64 global_id;
173 void *payload, *payload_end;
174 int payload_len;
175 char *result_msg;
176 int result_msg_len;
177 int ret = -EINVAL;
178
179 dout("handle_auth_reply %p %p\n", p, end);
180 ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad);
181 protocol = ceph_decode_32(&p);
182 result = ceph_decode_32(&p);
183 global_id = ceph_decode_64(&p);
184 payload_len = ceph_decode_32(&p);
185 payload = p;
186 p += payload_len;
187 ceph_decode_need(&p, end, sizeof(u32), bad);
188 result_msg_len = ceph_decode_32(&p);
189 result_msg = p;
190 p += result_msg_len;
191 if (p != end)
192 goto bad;
193
194 dout(" result %d '%.*s' gid %llu len %d\n", result, result_msg_len,
195 result_msg, global_id, payload_len);
196
197 payload_end = payload + payload_len;
198
199 if (global_id && ac->global_id != global_id) {
200 dout(" set global_id %lld -> %lld\n", ac->global_id, global_id);
201 ac->global_id = global_id;
202 }
203
204 if (ac->negotiating) {
205 /* server does not support our protocols? */
206 if (!protocol && result < 0) {
207 ret = result;
208 goto out;
209 }
210 /* set up (new) protocol handler? */
211 if (ac->protocol && ac->protocol != protocol) {
212 ac->ops->destroy(ac);
213 ac->protocol = 0;
214 ac->ops = NULL;
215 }
216 if (ac->protocol != protocol) {
217 ret = ceph_auth_init_protocol(ac, protocol);
218 if (ret) {
219 pr_err("error %d on auth protocol %d init\n",
220 ret, protocol);
221 goto out;
222 }
223 }
224
225 ac->negotiating = false;
226 }
227
228 ret = ac->ops->handle_reply(ac, result, payload, payload_end);
229 if (ret == -EAGAIN) {
230 return ceph_build_auth_request(ac, reply_buf, reply_len);
231 } else if (ret) {
232 pr_err("auth method '%s' error %d\n", ac->ops->name, ret);
233 return ret;
234 }
235 return 0;
236
237bad:
238 pr_err("failed to decode auth msg\n");
239out:
240 return ret;
241}
242
243int ceph_build_auth(struct ceph_auth_client *ac,
244 void *msg_buf, size_t msg_len)
245{
246 if (!ac->protocol)
247 return ceph_auth_build_hello(ac, msg_buf, msg_len);
248 BUG_ON(!ac->ops);
249 if (ac->ops->should_authenticate(ac))
250 return ceph_build_auth_request(ac, msg_buf, msg_len);
251 return 0;
252}
253
254int ceph_auth_is_authenticated(struct ceph_auth_client *ac)
255{
256 if (!ac->ops)
257 return 0;
258 return ac->ops->is_authenticated(ac);
259}
diff --git a/fs/ceph/auth.h b/fs/ceph/auth.h
deleted file mode 100644
index d38a2fb4a137..000000000000
--- a/fs/ceph/auth.h
+++ /dev/null
@@ -1,92 +0,0 @@
1#ifndef _FS_CEPH_AUTH_H
2#define _FS_CEPH_AUTH_H
3
4#include "types.h"
5#include "buffer.h"
6
7/*
8 * Abstract interface for communicating with the authenticate module.
9 * There is some handshake that takes place between us and the monitor
10 * to acquire the necessary keys. These are used to generate an
11 * 'authorizer' that we use when connecting to a service (mds, osd).
12 */
13
14struct ceph_auth_client;
15struct ceph_authorizer;
16
17struct ceph_auth_client_ops {
18 const char *name;
19
20 /*
21 * true if we are authenticated and can connect to
22 * services.
23 */
24 int (*is_authenticated)(struct ceph_auth_client *ac);
25
26 /*
27 * true if we should (re)authenticate, e.g., when our tickets
28 * are getting old and crusty.
29 */
30 int (*should_authenticate)(struct ceph_auth_client *ac);
31
32 /*
33 * build requests and process replies during monitor
34 * handshake. if handle_reply returns -EAGAIN, we build
35 * another request.
36 */
37 int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end);
38 int (*handle_reply)(struct ceph_auth_client *ac, int result,
39 void *buf, void *end);
40
41 /*
42 * Create authorizer for connecting to a service, and verify
43 * the response to authenticate the service.
44 */
45 int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type,
46 struct ceph_authorizer **a,
47 void **buf, size_t *len,
48 void **reply_buf, size_t *reply_len);
49 int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
50 struct ceph_authorizer *a, size_t len);
51 void (*destroy_authorizer)(struct ceph_auth_client *ac,
52 struct ceph_authorizer *a);
53 void (*invalidate_authorizer)(struct ceph_auth_client *ac,
54 int peer_type);
55
56 /* reset when we (re)connect to a monitor */
57 void (*reset)(struct ceph_auth_client *ac);
58
59 void (*destroy)(struct ceph_auth_client *ac);
60};
61
62struct ceph_auth_client {
63 u32 protocol; /* CEPH_AUTH_* */
64 void *private; /* for use by protocol implementation */
65 const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */
66
67 bool negotiating; /* true if negotiating protocol */
68 const char *name; /* entity name */
69 u64 global_id; /* our unique id in system */
70 const char *secret; /* our secret key */
71 unsigned want_keys; /* which services we want */
72};
73
74extern struct ceph_auth_client *ceph_auth_init(const char *name,
75 const char *secret);
76extern void ceph_auth_destroy(struct ceph_auth_client *ac);
77
78extern void ceph_auth_reset(struct ceph_auth_client *ac);
79
80extern int ceph_auth_build_hello(struct ceph_auth_client *ac,
81 void *buf, size_t len);
82extern int ceph_handle_auth_reply(struct ceph_auth_client *ac,
83 void *buf, size_t len,
84 void *reply_buf, size_t reply_len);
85extern int ceph_entity_name_encode(const char *name, void **p, void *end);
86
87extern int ceph_build_auth(struct ceph_auth_client *ac,
88 void *msg_buf, size_t msg_len);
89
90extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
91
92#endif
diff --git a/fs/ceph/auth_none.c b/fs/ceph/auth_none.c
deleted file mode 100644
index ad1dc21286c7..000000000000
--- a/fs/ceph/auth_none.c
+++ /dev/null
@@ -1,131 +0,0 @@
1
2#include "ceph_debug.h"
3
4#include <linux/err.h>
5#include <linux/module.h>
6#include <linux/random.h>
7#include <linux/slab.h>
8
9#include "auth_none.h"
10#include "auth.h"
11#include "decode.h"
12
13static void reset(struct ceph_auth_client *ac)
14{
15 struct ceph_auth_none_info *xi = ac->private;
16
17 xi->starting = true;
18 xi->built_authorizer = false;
19}
20
21static void destroy(struct ceph_auth_client *ac)
22{
23 kfree(ac->private);
24 ac->private = NULL;
25}
26
27static int is_authenticated(struct ceph_auth_client *ac)
28{
29 struct ceph_auth_none_info *xi = ac->private;
30
31 return !xi->starting;
32}
33
34static int should_authenticate(struct ceph_auth_client *ac)
35{
36 struct ceph_auth_none_info *xi = ac->private;
37
38 return xi->starting;
39}
40
41/*
42 * the generic auth code decode the global_id, and we carry no actual
43 * authenticate state, so nothing happens here.
44 */
45static int handle_reply(struct ceph_auth_client *ac, int result,
46 void *buf, void *end)
47{
48 struct ceph_auth_none_info *xi = ac->private;
49
50 xi->starting = false;
51 return result;
52}
53
54/*
55 * build an 'authorizer' with our entity_name and global_id. we can
56 * reuse a single static copy since it is identical for all services
57 * we connect to.
58 */
59static int ceph_auth_none_create_authorizer(
60 struct ceph_auth_client *ac, int peer_type,
61 struct ceph_authorizer **a,
62 void **buf, size_t *len,
63 void **reply_buf, size_t *reply_len)
64{
65 struct ceph_auth_none_info *ai = ac->private;
66 struct ceph_none_authorizer *au = &ai->au;
67 void *p, *end;
68 int ret;
69
70 if (!ai->built_authorizer) {
71 p = au->buf;
72 end = p + sizeof(au->buf);
73 ceph_encode_8(&p, 1);
74 ret = ceph_entity_name_encode(ac->name, &p, end - 8);
75 if (ret < 0)
76 goto bad;
77 ceph_decode_need(&p, end, sizeof(u64), bad2);
78 ceph_encode_64(&p, ac->global_id);
79 au->buf_len = p - (void *)au->buf;
80 ai->built_authorizer = true;
81 dout("built authorizer len %d\n", au->buf_len);
82 }
83
84 *a = (struct ceph_authorizer *)au;
85 *buf = au->buf;
86 *len = au->buf_len;
87 *reply_buf = au->reply_buf;
88 *reply_len = sizeof(au->reply_buf);
89 return 0;
90
91bad2:
92 ret = -ERANGE;
93bad:
94 return ret;
95}
96
97static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
98 struct ceph_authorizer *a)
99{
100 /* nothing to do */
101}
102
103static const struct ceph_auth_client_ops ceph_auth_none_ops = {
104 .name = "none",
105 .reset = reset,
106 .destroy = destroy,
107 .is_authenticated = is_authenticated,
108 .should_authenticate = should_authenticate,
109 .handle_reply = handle_reply,
110 .create_authorizer = ceph_auth_none_create_authorizer,
111 .destroy_authorizer = ceph_auth_none_destroy_authorizer,
112};
113
114int ceph_auth_none_init(struct ceph_auth_client *ac)
115{
116 struct ceph_auth_none_info *xi;
117
118 dout("ceph_auth_none_init %p\n", ac);
119 xi = kzalloc(sizeof(*xi), GFP_NOFS);
120 if (!xi)
121 return -ENOMEM;
122
123 xi->starting = true;
124 xi->built_authorizer = false;
125
126 ac->protocol = CEPH_AUTH_NONE;
127 ac->private = xi;
128 ac->ops = &ceph_auth_none_ops;
129 return 0;
130}
131
diff --git a/fs/ceph/auth_none.h b/fs/ceph/auth_none.h
deleted file mode 100644
index 8164df1a08be..000000000000
--- a/fs/ceph/auth_none.h
+++ /dev/null
@@ -1,30 +0,0 @@
1#ifndef _FS_CEPH_AUTH_NONE_H
2#define _FS_CEPH_AUTH_NONE_H
3
4#include <linux/slab.h>
5
6#include "auth.h"
7
8/*
9 * null security mode.
10 *
11 * we use a single static authorizer that simply encodes our entity name
12 * and global id.
13 */
14
15struct ceph_none_authorizer {
16 char buf[128];
17 int buf_len;
18 char reply_buf[0];
19};
20
21struct ceph_auth_none_info {
22 bool starting;
23 bool built_authorizer;
24 struct ceph_none_authorizer au; /* we only need one; it's static */
25};
26
27extern int ceph_auth_none_init(struct ceph_auth_client *ac);
28
29#endif
30
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
deleted file mode 100644
index a2d002cbdec2..000000000000
--- a/fs/ceph/auth_x.c
+++ /dev/null
@@ -1,687 +0,0 @@
1
2#include "ceph_debug.h"
3
4#include <linux/err.h>
5#include <linux/module.h>
6#include <linux/random.h>
7#include <linux/slab.h>
8
9#include "auth_x.h"
10#include "auth_x_protocol.h"
11#include "crypto.h"
12#include "auth.h"
13#include "decode.h"
14
15#define TEMP_TICKET_BUF_LEN 256
16
17static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed);
18
19static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
20{
21 struct ceph_x_info *xi = ac->private;
22 int need;
23
24 ceph_x_validate_tickets(ac, &need);
25 dout("ceph_x_is_authenticated want=%d need=%d have=%d\n",
26 ac->want_keys, need, xi->have_keys);
27 return (ac->want_keys & xi->have_keys) == ac->want_keys;
28}
29
30static int ceph_x_should_authenticate(struct ceph_auth_client *ac)
31{
32 struct ceph_x_info *xi = ac->private;
33 int need;
34
35 ceph_x_validate_tickets(ac, &need);
36 dout("ceph_x_should_authenticate want=%d need=%d have=%d\n",
37 ac->want_keys, need, xi->have_keys);
38 return need != 0;
39}
40
41static int ceph_x_encrypt_buflen(int ilen)
42{
43 return sizeof(struct ceph_x_encrypt_header) + ilen + 16 +
44 sizeof(u32);
45}
46
47static int ceph_x_encrypt(struct ceph_crypto_key *secret,
48 void *ibuf, int ilen, void *obuf, size_t olen)
49{
50 struct ceph_x_encrypt_header head = {
51 .struct_v = 1,
52 .magic = cpu_to_le64(CEPHX_ENC_MAGIC)
53 };
54 size_t len = olen - sizeof(u32);
55 int ret;
56
57 ret = ceph_encrypt2(secret, obuf + sizeof(u32), &len,
58 &head, sizeof(head), ibuf, ilen);
59 if (ret)
60 return ret;
61 ceph_encode_32(&obuf, len);
62 return len + sizeof(u32);
63}
64
65static int ceph_x_decrypt(struct ceph_crypto_key *secret,
66 void **p, void *end, void *obuf, size_t olen)
67{
68 struct ceph_x_encrypt_header head;
69 size_t head_len = sizeof(head);
70 int len, ret;
71
72 len = ceph_decode_32(p);
73 if (*p + len > end)
74 return -EINVAL;
75
76 dout("ceph_x_decrypt len %d\n", len);
77 ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen,
78 *p, len);
79 if (ret)
80 return ret;
81 if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC)
82 return -EPERM;
83 *p += len;
84 return olen;
85}
86
87/*
88 * get existing (or insert new) ticket handler
89 */
90static struct ceph_x_ticket_handler *
91get_ticket_handler(struct ceph_auth_client *ac, int service)
92{
93 struct ceph_x_ticket_handler *th;
94 struct ceph_x_info *xi = ac->private;
95 struct rb_node *parent = NULL, **p = &xi->ticket_handlers.rb_node;
96
97 while (*p) {
98 parent = *p;
99 th = rb_entry(parent, struct ceph_x_ticket_handler, node);
100 if (service < th->service)
101 p = &(*p)->rb_left;
102 else if (service > th->service)
103 p = &(*p)->rb_right;
104 else
105 return th;
106 }
107
108 /* add it */
109 th = kzalloc(sizeof(*th), GFP_NOFS);
110 if (!th)
111 return ERR_PTR(-ENOMEM);
112 th->service = service;
113 rb_link_node(&th->node, parent, p);
114 rb_insert_color(&th->node, &xi->ticket_handlers);
115 return th;
116}
117
118static void remove_ticket_handler(struct ceph_auth_client *ac,
119 struct ceph_x_ticket_handler *th)
120{
121 struct ceph_x_info *xi = ac->private;
122
123 dout("remove_ticket_handler %p %d\n", th, th->service);
124 rb_erase(&th->node, &xi->ticket_handlers);
125 ceph_crypto_key_destroy(&th->session_key);
126 if (th->ticket_blob)
127 ceph_buffer_put(th->ticket_blob);
128 kfree(th);
129}
130
131static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
132 struct ceph_crypto_key *secret,
133 void *buf, void *end)
134{
135 struct ceph_x_info *xi = ac->private;
136 int num;
137 void *p = buf;
138 int ret;
139 char *dbuf;
140 char *ticket_buf;
141 u8 reply_struct_v;
142
143 dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
144 if (!dbuf)
145 return -ENOMEM;
146
147 ret = -ENOMEM;
148 ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS);
149 if (!ticket_buf)
150 goto out_dbuf;
151
152 ceph_decode_need(&p, end, 1 + sizeof(u32), bad);
153 reply_struct_v = ceph_decode_8(&p);
154 if (reply_struct_v != 1)
155 goto bad;
156 num = ceph_decode_32(&p);
157 dout("%d tickets\n", num);
158 while (num--) {
159 int type;
160 u8 tkt_struct_v, blob_struct_v;
161 struct ceph_x_ticket_handler *th;
162 void *dp, *dend;
163 int dlen;
164 char is_enc;
165 struct timespec validity;
166 struct ceph_crypto_key old_key;
167 void *tp, *tpend;
168 struct ceph_timespec new_validity;
169 struct ceph_crypto_key new_session_key;
170 struct ceph_buffer *new_ticket_blob;
171 unsigned long new_expires, new_renew_after;
172 u64 new_secret_id;
173
174 ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
175
176 type = ceph_decode_32(&p);
177 dout(" ticket type %d %s\n", type, ceph_entity_type_name(type));
178
179 tkt_struct_v = ceph_decode_8(&p);
180 if (tkt_struct_v != 1)
181 goto bad;
182
183 th = get_ticket_handler(ac, type);
184 if (IS_ERR(th)) {
185 ret = PTR_ERR(th);
186 goto out;
187 }
188
189 /* blob for me */
190 dlen = ceph_x_decrypt(secret, &p, end, dbuf,
191 TEMP_TICKET_BUF_LEN);
192 if (dlen <= 0) {
193 ret = dlen;
194 goto out;
195 }
196 dout(" decrypted %d bytes\n", dlen);
197 dend = dbuf + dlen;
198 dp = dbuf;
199
200 tkt_struct_v = ceph_decode_8(&dp);
201 if (tkt_struct_v != 1)
202 goto bad;
203
204 memcpy(&old_key, &th->session_key, sizeof(old_key));
205 ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
206 if (ret)
207 goto out;
208
209 ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
210 ceph_decode_timespec(&validity, &new_validity);
211 new_expires = get_seconds() + validity.tv_sec;
212 new_renew_after = new_expires - (validity.tv_sec / 4);
213 dout(" expires=%lu renew_after=%lu\n", new_expires,
214 new_renew_after);
215
216 /* ticket blob for service */
217 ceph_decode_8_safe(&p, end, is_enc, bad);
218 tp = ticket_buf;
219 if (is_enc) {
220 /* encrypted */
221 dout(" encrypted ticket\n");
222 dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf,
223 TEMP_TICKET_BUF_LEN);
224 if (dlen < 0) {
225 ret = dlen;
226 goto out;
227 }
228 dlen = ceph_decode_32(&tp);
229 } else {
230 /* unencrypted */
231 ceph_decode_32_safe(&p, end, dlen, bad);
232 ceph_decode_need(&p, end, dlen, bad);
233 ceph_decode_copy(&p, ticket_buf, dlen);
234 }
235 tpend = tp + dlen;
236 dout(" ticket blob is %d bytes\n", dlen);
237 ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
238 blob_struct_v = ceph_decode_8(&tp);
239 new_secret_id = ceph_decode_64(&tp);
240 ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
241 if (ret)
242 goto out;
243
244 /* all is well, update our ticket */
245 ceph_crypto_key_destroy(&th->session_key);
246 if (th->ticket_blob)
247 ceph_buffer_put(th->ticket_blob);
248 th->session_key = new_session_key;
249 th->ticket_blob = new_ticket_blob;
250 th->validity = new_validity;
251 th->secret_id = new_secret_id;
252 th->expires = new_expires;
253 th->renew_after = new_renew_after;
254 dout(" got ticket service %d (%s) secret_id %lld len %d\n",
255 type, ceph_entity_type_name(type), th->secret_id,
256 (int)th->ticket_blob->vec.iov_len);
257 xi->have_keys |= th->service;
258 }
259
260 ret = 0;
261out:
262 kfree(ticket_buf);
263out_dbuf:
264 kfree(dbuf);
265 return ret;
266
267bad:
268 ret = -EINVAL;
269 goto out;
270}
271
272static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
273 struct ceph_x_ticket_handler *th,
274 struct ceph_x_authorizer *au)
275{
276 int maxlen;
277 struct ceph_x_authorize_a *msg_a;
278 struct ceph_x_authorize_b msg_b;
279 void *p, *end;
280 int ret;
281 int ticket_blob_len =
282 (th->ticket_blob ? th->ticket_blob->vec.iov_len : 0);
283
284 dout("build_authorizer for %s %p\n",
285 ceph_entity_type_name(th->service), au);
286
287 maxlen = sizeof(*msg_a) + sizeof(msg_b) +
288 ceph_x_encrypt_buflen(ticket_blob_len);
289 dout(" need len %d\n", maxlen);
290 if (au->buf && au->buf->alloc_len < maxlen) {
291 ceph_buffer_put(au->buf);
292 au->buf = NULL;
293 }
294 if (!au->buf) {
295 au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
296 if (!au->buf)
297 return -ENOMEM;
298 }
299 au->service = th->service;
300
301 msg_a = au->buf->vec.iov_base;
302 msg_a->struct_v = 1;
303 msg_a->global_id = cpu_to_le64(ac->global_id);
304 msg_a->service_id = cpu_to_le32(th->service);
305 msg_a->ticket_blob.struct_v = 1;
306 msg_a->ticket_blob.secret_id = cpu_to_le64(th->secret_id);
307 msg_a->ticket_blob.blob_len = cpu_to_le32(ticket_blob_len);
308 if (ticket_blob_len) {
309 memcpy(msg_a->ticket_blob.blob, th->ticket_blob->vec.iov_base,
310 th->ticket_blob->vec.iov_len);
311 }
312 dout(" th %p secret_id %lld %lld\n", th, th->secret_id,
313 le64_to_cpu(msg_a->ticket_blob.secret_id));
314
315 p = msg_a + 1;
316 p += ticket_blob_len;
317 end = au->buf->vec.iov_base + au->buf->vec.iov_len;
318
319 get_random_bytes(&au->nonce, sizeof(au->nonce));
320 msg_b.struct_v = 1;
321 msg_b.nonce = cpu_to_le64(au->nonce);
322 ret = ceph_x_encrypt(&th->session_key, &msg_b, sizeof(msg_b),
323 p, end - p);
324 if (ret < 0)
325 goto out_buf;
326 p += ret;
327 au->buf->vec.iov_len = p - au->buf->vec.iov_base;
328 dout(" built authorizer nonce %llx len %d\n", au->nonce,
329 (int)au->buf->vec.iov_len);
330 BUG_ON(au->buf->vec.iov_len > maxlen);
331 return 0;
332
333out_buf:
334 ceph_buffer_put(au->buf);
335 au->buf = NULL;
336 return ret;
337}
338
339static int ceph_x_encode_ticket(struct ceph_x_ticket_handler *th,
340 void **p, void *end)
341{
342 ceph_decode_need(p, end, 1 + sizeof(u64), bad);
343 ceph_encode_8(p, 1);
344 ceph_encode_64(p, th->secret_id);
345 if (th->ticket_blob) {
346 const char *buf = th->ticket_blob->vec.iov_base;
347 u32 len = th->ticket_blob->vec.iov_len;
348
349 ceph_encode_32_safe(p, end, len, bad);
350 ceph_encode_copy_safe(p, end, buf, len, bad);
351 } else {
352 ceph_encode_32_safe(p, end, 0, bad);
353 }
354
355 return 0;
356bad:
357 return -ERANGE;
358}
359
360static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed)
361{
362 int want = ac->want_keys;
363 struct ceph_x_info *xi = ac->private;
364 int service;
365
366 *pneed = ac->want_keys & ~(xi->have_keys);
367
368 for (service = 1; service <= want; service <<= 1) {
369 struct ceph_x_ticket_handler *th;
370
371 if (!(ac->want_keys & service))
372 continue;
373
374 if (*pneed & service)
375 continue;
376
377 th = get_ticket_handler(ac, service);
378
379 if (IS_ERR(th)) {
380 *pneed |= service;
381 continue;
382 }
383
384 if (get_seconds() >= th->renew_after)
385 *pneed |= service;
386 if (get_seconds() >= th->expires)
387 xi->have_keys &= ~service;
388 }
389}
390
391
392static int ceph_x_build_request(struct ceph_auth_client *ac,
393 void *buf, void *end)
394{
395 struct ceph_x_info *xi = ac->private;
396 int need;
397 struct ceph_x_request_header *head = buf;
398 int ret;
399 struct ceph_x_ticket_handler *th =
400 get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
401
402 if (IS_ERR(th))
403 return PTR_ERR(th);
404
405 ceph_x_validate_tickets(ac, &need);
406
407 dout("build_request want %x have %x need %x\n",
408 ac->want_keys, xi->have_keys, need);
409
410 if (need & CEPH_ENTITY_TYPE_AUTH) {
411 struct ceph_x_authenticate *auth = (void *)(head + 1);
412 void *p = auth + 1;
413 struct ceph_x_challenge_blob tmp;
414 char tmp_enc[40];
415 u64 *u;
416
417 if (p > end)
418 return -ERANGE;
419
420 dout(" get_auth_session_key\n");
421 head->op = cpu_to_le16(CEPHX_GET_AUTH_SESSION_KEY);
422
423 /* encrypt and hash */
424 get_random_bytes(&auth->client_challenge, sizeof(u64));
425 tmp.client_challenge = auth->client_challenge;
426 tmp.server_challenge = cpu_to_le64(xi->server_challenge);
427 ret = ceph_x_encrypt(&xi->secret, &tmp, sizeof(tmp),
428 tmp_enc, sizeof(tmp_enc));
429 if (ret < 0)
430 return ret;
431
432 auth->struct_v = 1;
433 auth->key = 0;
434 for (u = (u64 *)tmp_enc; u + 1 <= (u64 *)(tmp_enc + ret); u++)
435 auth->key ^= *(__le64 *)u;
436 dout(" server_challenge %llx client_challenge %llx key %llx\n",
437 xi->server_challenge, le64_to_cpu(auth->client_challenge),
438 le64_to_cpu(auth->key));
439
440 /* now encode the old ticket if exists */
441 ret = ceph_x_encode_ticket(th, &p, end);
442 if (ret < 0)
443 return ret;
444
445 return p - buf;
446 }
447
448 if (need) {
449 void *p = head + 1;
450 struct ceph_x_service_ticket_request *req;
451
452 if (p > end)
453 return -ERANGE;
454 head->op = cpu_to_le16(CEPHX_GET_PRINCIPAL_SESSION_KEY);
455
456 ret = ceph_x_build_authorizer(ac, th, &xi->auth_authorizer);
457 if (ret)
458 return ret;
459 ceph_encode_copy(&p, xi->auth_authorizer.buf->vec.iov_base,
460 xi->auth_authorizer.buf->vec.iov_len);
461
462 req = p;
463 req->keys = cpu_to_le32(need);
464 p += sizeof(*req);
465 return p - buf;
466 }
467
468 return 0;
469}
470
471static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
472 void *buf, void *end)
473{
474 struct ceph_x_info *xi = ac->private;
475 struct ceph_x_reply_header *head = buf;
476 struct ceph_x_ticket_handler *th;
477 int len = end - buf;
478 int op;
479 int ret;
480
481 if (result)
482 return result; /* XXX hmm? */
483
484 if (xi->starting) {
485 /* it's a hello */
486 struct ceph_x_server_challenge *sc = buf;
487
488 if (len != sizeof(*sc))
489 return -EINVAL;
490 xi->server_challenge = le64_to_cpu(sc->server_challenge);
491 dout("handle_reply got server challenge %llx\n",
492 xi->server_challenge);
493 xi->starting = false;
494 xi->have_keys &= ~CEPH_ENTITY_TYPE_AUTH;
495 return -EAGAIN;
496 }
497
498 op = le16_to_cpu(head->op);
499 result = le32_to_cpu(head->result);
500 dout("handle_reply op %d result %d\n", op, result);
501 switch (op) {
502 case CEPHX_GET_AUTH_SESSION_KEY:
503 /* verify auth key */
504 ret = ceph_x_proc_ticket_reply(ac, &xi->secret,
505 buf + sizeof(*head), end);
506 break;
507
508 case CEPHX_GET_PRINCIPAL_SESSION_KEY:
509 th = get_ticket_handler(ac, CEPH_ENTITY_TYPE_AUTH);
510 if (IS_ERR(th))
511 return PTR_ERR(th);
512 ret = ceph_x_proc_ticket_reply(ac, &th->session_key,
513 buf + sizeof(*head), end);
514 break;
515
516 default:
517 return -EINVAL;
518 }
519 if (ret)
520 return ret;
521 if (ac->want_keys == xi->have_keys)
522 return 0;
523 return -EAGAIN;
524}
525
526static int ceph_x_create_authorizer(
527 struct ceph_auth_client *ac, int peer_type,
528 struct ceph_authorizer **a,
529 void **buf, size_t *len,
530 void **reply_buf, size_t *reply_len)
531{
532 struct ceph_x_authorizer *au;
533 struct ceph_x_ticket_handler *th;
534 int ret;
535
536 th = get_ticket_handler(ac, peer_type);
537 if (IS_ERR(th))
538 return PTR_ERR(th);
539
540 au = kzalloc(sizeof(*au), GFP_NOFS);
541 if (!au)
542 return -ENOMEM;
543
544 ret = ceph_x_build_authorizer(ac, th, au);
545 if (ret) {
546 kfree(au);
547 return ret;
548 }
549
550 *a = (struct ceph_authorizer *)au;
551 *buf = au->buf->vec.iov_base;
552 *len = au->buf->vec.iov_len;
553 *reply_buf = au->reply_buf;
554 *reply_len = sizeof(au->reply_buf);
555 return 0;
556}
557
558static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
559 struct ceph_authorizer *a, size_t len)
560{
561 struct ceph_x_authorizer *au = (void *)a;
562 struct ceph_x_ticket_handler *th;
563 int ret = 0;
564 struct ceph_x_authorize_reply reply;
565 void *p = au->reply_buf;
566 void *end = p + sizeof(au->reply_buf);
567
568 th = get_ticket_handler(ac, au->service);
569 if (IS_ERR(th))
570 return PTR_ERR(th);
571 ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply));
572 if (ret < 0)
573 return ret;
574 if (ret != sizeof(reply))
575 return -EPERM;
576
577 if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one))
578 ret = -EPERM;
579 else
580 ret = 0;
581 dout("verify_authorizer_reply nonce %llx got %llx ret %d\n",
582 au->nonce, le64_to_cpu(reply.nonce_plus_one), ret);
583 return ret;
584}
585
586static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
587 struct ceph_authorizer *a)
588{
589 struct ceph_x_authorizer *au = (void *)a;
590
591 ceph_buffer_put(au->buf);
592 kfree(au);
593}
594
595
596static void ceph_x_reset(struct ceph_auth_client *ac)
597{
598 struct ceph_x_info *xi = ac->private;
599
600 dout("reset\n");
601 xi->starting = true;
602 xi->server_challenge = 0;
603}
604
605static void ceph_x_destroy(struct ceph_auth_client *ac)
606{
607 struct ceph_x_info *xi = ac->private;
608 struct rb_node *p;
609
610 dout("ceph_x_destroy %p\n", ac);
611 ceph_crypto_key_destroy(&xi->secret);
612
613 while ((p = rb_first(&xi->ticket_handlers)) != NULL) {
614 struct ceph_x_ticket_handler *th =
615 rb_entry(p, struct ceph_x_ticket_handler, node);
616 remove_ticket_handler(ac, th);
617 }
618
619 if (xi->auth_authorizer.buf)
620 ceph_buffer_put(xi->auth_authorizer.buf);
621
622 kfree(ac->private);
623 ac->private = NULL;
624}
625
626static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac,
627 int peer_type)
628{
629 struct ceph_x_ticket_handler *th;
630
631 th = get_ticket_handler(ac, peer_type);
632 if (!IS_ERR(th))
633 remove_ticket_handler(ac, th);
634}
635
636
637static const struct ceph_auth_client_ops ceph_x_ops = {
638 .name = "x",
639 .is_authenticated = ceph_x_is_authenticated,
640 .should_authenticate = ceph_x_should_authenticate,
641 .build_request = ceph_x_build_request,
642 .handle_reply = ceph_x_handle_reply,
643 .create_authorizer = ceph_x_create_authorizer,
644 .verify_authorizer_reply = ceph_x_verify_authorizer_reply,
645 .destroy_authorizer = ceph_x_destroy_authorizer,
646 .invalidate_authorizer = ceph_x_invalidate_authorizer,
647 .reset = ceph_x_reset,
648 .destroy = ceph_x_destroy,
649};
650
651
652int ceph_x_init(struct ceph_auth_client *ac)
653{
654 struct ceph_x_info *xi;
655 int ret;
656
657 dout("ceph_x_init %p\n", ac);
658 ret = -ENOMEM;
659 xi = kzalloc(sizeof(*xi), GFP_NOFS);
660 if (!xi)
661 goto out;
662
663 ret = -EINVAL;
664 if (!ac->secret) {
665 pr_err("no secret set (for auth_x protocol)\n");
666 goto out_nomem;
667 }
668
669 ret = ceph_crypto_key_unarmor(&xi->secret, ac->secret);
670 if (ret)
671 goto out_nomem;
672
673 xi->starting = true;
674 xi->ticket_handlers = RB_ROOT;
675
676 ac->protocol = CEPH_AUTH_CEPHX;
677 ac->private = xi;
678 ac->ops = &ceph_x_ops;
679 return 0;
680
681out_nomem:
682 kfree(xi);
683out:
684 return ret;
685}
686
687
diff --git a/fs/ceph/auth_x.h b/fs/ceph/auth_x.h
deleted file mode 100644
index ff6f8180e681..000000000000
--- a/fs/ceph/auth_x.h
+++ /dev/null
@@ -1,49 +0,0 @@
1#ifndef _FS_CEPH_AUTH_X_H
2#define _FS_CEPH_AUTH_X_H
3
4#include <linux/rbtree.h>
5
6#include "crypto.h"
7#include "auth.h"
8#include "auth_x_protocol.h"
9
10/*
11 * Handle ticket for a single service.
12 */
13struct ceph_x_ticket_handler {
14 struct rb_node node;
15 unsigned service;
16
17 struct ceph_crypto_key session_key;
18 struct ceph_timespec validity;
19
20 u64 secret_id;
21 struct ceph_buffer *ticket_blob;
22
23 unsigned long renew_after, expires;
24};
25
26
27struct ceph_x_authorizer {
28 struct ceph_buffer *buf;
29 unsigned service;
30 u64 nonce;
31 char reply_buf[128]; /* big enough for encrypted blob */
32};
33
34struct ceph_x_info {
35 struct ceph_crypto_key secret;
36
37 bool starting;
38 u64 server_challenge;
39
40 unsigned have_keys;
41 struct rb_root ticket_handlers;
42
43 struct ceph_x_authorizer auth_authorizer;
44};
45
46extern int ceph_x_init(struct ceph_auth_client *ac);
47
48#endif
49
diff --git a/fs/ceph/auth_x_protocol.h b/fs/ceph/auth_x_protocol.h
deleted file mode 100644
index 671d30576c4f..000000000000
--- a/fs/ceph/auth_x_protocol.h
+++ /dev/null
@@ -1,90 +0,0 @@
1#ifndef __FS_CEPH_AUTH_X_PROTOCOL
2#define __FS_CEPH_AUTH_X_PROTOCOL
3
4#define CEPHX_GET_AUTH_SESSION_KEY 0x0100
5#define CEPHX_GET_PRINCIPAL_SESSION_KEY 0x0200
6#define CEPHX_GET_ROTATING_KEY 0x0400
7
8/* common bits */
9struct ceph_x_ticket_blob {
10 __u8 struct_v;
11 __le64 secret_id;
12 __le32 blob_len;
13 char blob[];
14} __attribute__ ((packed));
15
16
17/* common request/reply headers */
18struct ceph_x_request_header {
19 __le16 op;
20} __attribute__ ((packed));
21
22struct ceph_x_reply_header {
23 __le16 op;
24 __le32 result;
25} __attribute__ ((packed));
26
27
28/* authenticate handshake */
29
30/* initial hello (no reply header) */
31struct ceph_x_server_challenge {
32 __u8 struct_v;
33 __le64 server_challenge;
34} __attribute__ ((packed));
35
36struct ceph_x_authenticate {
37 __u8 struct_v;
38 __le64 client_challenge;
39 __le64 key;
40 /* ticket blob */
41} __attribute__ ((packed));
42
43struct ceph_x_service_ticket_request {
44 __u8 struct_v;
45 __le32 keys;
46} __attribute__ ((packed));
47
48struct ceph_x_challenge_blob {
49 __le64 server_challenge;
50 __le64 client_challenge;
51} __attribute__ ((packed));
52
53
54
55/* authorize handshake */
56
57/*
58 * The authorizer consists of two pieces:
59 * a - service id, ticket blob
60 * b - encrypted with session key
61 */
62struct ceph_x_authorize_a {
63 __u8 struct_v;
64 __le64 global_id;
65 __le32 service_id;
66 struct ceph_x_ticket_blob ticket_blob;
67} __attribute__ ((packed));
68
69struct ceph_x_authorize_b {
70 __u8 struct_v;
71 __le64 nonce;
72} __attribute__ ((packed));
73
74struct ceph_x_authorize_reply {
75 __u8 struct_v;
76 __le64 nonce_plus_one;
77} __attribute__ ((packed));
78
79
80/*
81 * encyption bundle
82 */
83#define CEPHX_ENC_MAGIC 0xff009cad8826aa55ull
84
85struct ceph_x_encrypt_header {
86 __u8 struct_v;
87 __le64 magic;
88} __attribute__ ((packed));
89
90#endif
diff --git a/fs/ceph/buffer.c b/fs/ceph/buffer.c
deleted file mode 100644
index cd39f17021de..000000000000
--- a/fs/ceph/buffer.c
+++ /dev/null
@@ -1,65 +0,0 @@
1
2#include "ceph_debug.h"
3
4#include <linux/slab.h>
5
6#include "buffer.h"
7#include "decode.h"
8
9struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
10{
11 struct ceph_buffer *b;
12
13 b = kmalloc(sizeof(*b), gfp);
14 if (!b)
15 return NULL;
16
17 b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN);
18 if (b->vec.iov_base) {
19 b->is_vmalloc = false;
20 } else {
21 b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
22 if (!b->vec.iov_base) {
23 kfree(b);
24 return NULL;
25 }
26 b->is_vmalloc = true;
27 }
28
29 kref_init(&b->kref);
30 b->alloc_len = len;
31 b->vec.iov_len = len;
32 dout("buffer_new %p\n", b);
33 return b;
34}
35
36void ceph_buffer_release(struct kref *kref)
37{
38 struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref);
39
40 dout("buffer_release %p\n", b);
41 if (b->vec.iov_base) {
42 if (b->is_vmalloc)
43 vfree(b->vec.iov_base);
44 else
45 kfree(b->vec.iov_base);
46 }
47 kfree(b);
48}
49
50int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end)
51{
52 size_t len;
53
54 ceph_decode_need(p, end, sizeof(u32), bad);
55 len = ceph_decode_32(p);
56 dout("decode_buffer len %d\n", (int)len);
57 ceph_decode_need(p, end, len, bad);
58 *b = ceph_buffer_new(len, GFP_NOFS);
59 if (!*b)
60 return -ENOMEM;
61 ceph_decode_copy(p, (*b)->vec.iov_base, len);
62 return 0;
63bad:
64 return -EINVAL;
65}
diff --git a/fs/ceph/buffer.h b/fs/ceph/buffer.h
deleted file mode 100644
index 58d19014068f..000000000000
--- a/fs/ceph/buffer.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef __FS_CEPH_BUFFER_H
2#define __FS_CEPH_BUFFER_H
3
4#include <linux/kref.h>
5#include <linux/mm.h>
6#include <linux/vmalloc.h>
7#include <linux/types.h>
8#include <linux/uio.h>
9
10/*
11 * a simple reference counted buffer.
12 *
13 * use kmalloc for small sizes (<= one page), vmalloc for larger
14 * sizes.
15 */
16struct ceph_buffer {
17 struct kref kref;
18 struct kvec vec;
19 size_t alloc_len;
20 bool is_vmalloc;
21};
22
23extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp);
24extern void ceph_buffer_release(struct kref *kref);
25
26static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b)
27{
28 kref_get(&b->kref);
29 return b;
30}
31
32static inline void ceph_buffer_put(struct ceph_buffer *b)
33{
34 kref_put(&b->kref, ceph_buffer_release);
35}
36
37extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end);
38
39#endif
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 5e9da996a151..3cff67cbb9c0 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/fs.h> 3#include <linux/fs.h>
4#include <linux/kernel.h> 4#include <linux/kernel.h>
@@ -9,8 +9,9 @@
9#include <linux/writeback.h> 9#include <linux/writeback.h>
10 10
11#include "super.h" 11#include "super.h"
12#include "decode.h" 12#include "mds_client.h"
13#include "messenger.h" 13#include <linux/ceph/decode.h>
14#include <linux/ceph/messenger.h>
14 15
15/* 16/*
16 * Capability management 17 * Capability management
@@ -287,11 +288,11 @@ void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
287 spin_unlock(&mdsc->caps_list_lock); 288 spin_unlock(&mdsc->caps_list_lock);
288} 289}
289 290
290void ceph_reservation_status(struct ceph_client *client, 291void ceph_reservation_status(struct ceph_fs_client *fsc,
291 int *total, int *avail, int *used, int *reserved, 292 int *total, int *avail, int *used, int *reserved,
292 int *min) 293 int *min)
293{ 294{
294 struct ceph_mds_client *mdsc = &client->mdsc; 295 struct ceph_mds_client *mdsc = fsc->mdsc;
295 296
296 if (total) 297 if (total)
297 *total = mdsc->caps_total_count; 298 *total = mdsc->caps_total_count;
@@ -399,7 +400,7 @@ static void __insert_cap_node(struct ceph_inode_info *ci,
399static void __cap_set_timeouts(struct ceph_mds_client *mdsc, 400static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
400 struct ceph_inode_info *ci) 401 struct ceph_inode_info *ci)
401{ 402{
402 struct ceph_mount_args *ma = mdsc->client->mount_args; 403 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
403 404
404 ci->i_hold_caps_min = round_jiffies(jiffies + 405 ci->i_hold_caps_min = round_jiffies(jiffies +
405 ma->caps_wanted_delay_min * HZ); 406 ma->caps_wanted_delay_min * HZ);
@@ -515,7 +516,7 @@ int ceph_add_cap(struct inode *inode,
515 unsigned seq, unsigned mseq, u64 realmino, int flags, 516 unsigned seq, unsigned mseq, u64 realmino, int flags,
516 struct ceph_cap_reservation *caps_reservation) 517 struct ceph_cap_reservation *caps_reservation)
517{ 518{
518 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 519 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
519 struct ceph_inode_info *ci = ceph_inode(inode); 520 struct ceph_inode_info *ci = ceph_inode(inode);
520 struct ceph_cap *new_cap = NULL; 521 struct ceph_cap *new_cap = NULL;
521 struct ceph_cap *cap; 522 struct ceph_cap *cap;
@@ -873,7 +874,7 @@ void __ceph_remove_cap(struct ceph_cap *cap)
873 struct ceph_mds_session *session = cap->session; 874 struct ceph_mds_session *session = cap->session;
874 struct ceph_inode_info *ci = cap->ci; 875 struct ceph_inode_info *ci = cap->ci;
875 struct ceph_mds_client *mdsc = 876 struct ceph_mds_client *mdsc =
876 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 877 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
877 int removed = 0; 878 int removed = 0;
878 879
879 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode); 880 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
@@ -1210,7 +1211,7 @@ void __ceph_flush_snaps(struct ceph_inode_info *ci,
1210 int mds; 1211 int mds;
1211 struct ceph_cap_snap *capsnap; 1212 struct ceph_cap_snap *capsnap;
1212 u32 mseq; 1213 u32 mseq;
1213 struct ceph_mds_client *mdsc = &ceph_inode_to_client(inode)->mdsc; 1214 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1214 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold 1215 struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
1215 session->s_mutex */ 1216 session->s_mutex */
1216 u64 next_follows = 0; /* keep track of how far we've gotten through the 1217 u64 next_follows = 0; /* keep track of how far we've gotten through the
@@ -1336,7 +1337,7 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
1336void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask) 1337void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1337{ 1338{
1338 struct ceph_mds_client *mdsc = 1339 struct ceph_mds_client *mdsc =
1339 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 1340 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1340 struct inode *inode = &ci->vfs_inode; 1341 struct inode *inode = &ci->vfs_inode;
1341 int was = ci->i_dirty_caps; 1342 int was = ci->i_dirty_caps;
1342 int dirty = 0; 1343 int dirty = 0;
@@ -1378,7 +1379,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
1378static int __mark_caps_flushing(struct inode *inode, 1379static int __mark_caps_flushing(struct inode *inode,
1379 struct ceph_mds_session *session) 1380 struct ceph_mds_session *session)
1380{ 1381{
1381 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 1382 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1382 struct ceph_inode_info *ci = ceph_inode(inode); 1383 struct ceph_inode_info *ci = ceph_inode(inode);
1383 int flushing; 1384 int flushing;
1384 1385
@@ -1462,8 +1463,8 @@ static int try_nonblocking_invalidate(struct inode *inode)
1462void ceph_check_caps(struct ceph_inode_info *ci, int flags, 1463void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1463 struct ceph_mds_session *session) 1464 struct ceph_mds_session *session)
1464{ 1465{
1465 struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode); 1466 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1466 struct ceph_mds_client *mdsc = &client->mdsc; 1467 struct ceph_mds_client *mdsc = fsc->mdsc;
1467 struct inode *inode = &ci->vfs_inode; 1468 struct inode *inode = &ci->vfs_inode;
1468 struct ceph_cap *cap; 1469 struct ceph_cap *cap;
1469 int file_wanted, used; 1470 int file_wanted, used;
@@ -1706,7 +1707,7 @@ ack:
1706static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session, 1707static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
1707 unsigned *flush_tid) 1708 unsigned *flush_tid)
1708{ 1709{
1709 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 1710 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1710 struct ceph_inode_info *ci = ceph_inode(inode); 1711 struct ceph_inode_info *ci = ceph_inode(inode);
1711 int unlock_session = session ? 0 : 1; 1712 int unlock_session = session ? 0 : 1;
1712 int flushing = 0; 1713 int flushing = 0;
@@ -1872,7 +1873,7 @@ int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
1872 caps_are_flushed(inode, flush_tid)); 1873 caps_are_flushed(inode, flush_tid));
1873 } else { 1874 } else {
1874 struct ceph_mds_client *mdsc = 1875 struct ceph_mds_client *mdsc =
1875 &ceph_sb_to_client(inode->i_sb)->mdsc; 1876 ceph_sb_to_client(inode->i_sb)->mdsc;
1876 1877
1877 spin_lock(&inode->i_lock); 1878 spin_lock(&inode->i_lock);
1878 if (__ceph_caps_dirty(ci)) 1879 if (__ceph_caps_dirty(ci))
@@ -2465,7 +2466,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2465 __releases(inode->i_lock) 2466 __releases(inode->i_lock)
2466{ 2467{
2467 struct ceph_inode_info *ci = ceph_inode(inode); 2468 struct ceph_inode_info *ci = ceph_inode(inode);
2468 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 2469 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2469 unsigned seq = le32_to_cpu(m->seq); 2470 unsigned seq = le32_to_cpu(m->seq);
2470 int dirty = le32_to_cpu(m->dirty); 2471 int dirty = le32_to_cpu(m->dirty);
2471 int cleaned = 0; 2472 int cleaned = 0;
@@ -2713,7 +2714,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
2713 struct ceph_msg *msg) 2714 struct ceph_msg *msg)
2714{ 2715{
2715 struct ceph_mds_client *mdsc = session->s_mdsc; 2716 struct ceph_mds_client *mdsc = session->s_mdsc;
2716 struct super_block *sb = mdsc->client->sb; 2717 struct super_block *sb = mdsc->fsc->sb;
2717 struct inode *inode; 2718 struct inode *inode;
2718 struct ceph_cap *cap; 2719 struct ceph_cap *cap;
2719 struct ceph_mds_caps *h; 2720 struct ceph_mds_caps *h;
diff --git a/fs/ceph/ceph_debug.h b/fs/ceph/ceph_debug.h
deleted file mode 100644
index 1818c2305610..000000000000
--- a/fs/ceph/ceph_debug.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef _FS_CEPH_DEBUG_H
2#define _FS_CEPH_DEBUG_H
3
4#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5
6#ifdef CONFIG_CEPH_FS_PRETTYDEBUG
7
8/*
9 * wrap pr_debug to include a filename:lineno prefix on each line.
10 * this incurs some overhead (kernel size and execution time) due to
11 * the extra function call at each call site.
12 */
13
14# if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
15extern const char *ceph_file_part(const char *s, int len);
16# define dout(fmt, ...) \
17 pr_debug(" %12.12s:%-4d : " fmt, \
18 ceph_file_part(__FILE__, sizeof(__FILE__)), \
19 __LINE__, ##__VA_ARGS__)
20# else
21/* faux printk call just to see any compiler warnings. */
22# define dout(fmt, ...) do { \
23 if (0) \
24 printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
25 } while (0)
26# endif
27
28#else
29
30/*
31 * or, just wrap pr_debug
32 */
33# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
34
35#endif
36
37#endif
diff --git a/fs/ceph/ceph_frag.c b/fs/ceph/ceph_frag.c
index ab6cf35c4091..bdce8b1fbd06 100644
--- a/fs/ceph/ceph_frag.c
+++ b/fs/ceph/ceph_frag.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Ceph 'frag' type 2 * Ceph 'frag' type
3 */ 3 */
4#include "types.h" 4#include <linux/module.h>
5#include <linux/ceph/types.h>
5 6
6int ceph_frag_compare(__u32 a, __u32 b) 7int ceph_frag_compare(__u32 a, __u32 b)
7{ 8{
diff --git a/fs/ceph/ceph_frag.h b/fs/ceph/ceph_frag.h
deleted file mode 100644
index 5babb8e95352..000000000000
--- a/fs/ceph/ceph_frag.h
+++ /dev/null
@@ -1,109 +0,0 @@
1#ifndef FS_CEPH_FRAG_H
2#define FS_CEPH_FRAG_H
3
4/*
5 * "Frags" are a way to describe a subset of a 32-bit number space,
6 * using a mask and a value to match against that mask. Any given frag
7 * (subset of the number space) can be partitioned into 2^n sub-frags.
8 *
9 * Frags are encoded into a 32-bit word:
10 * 8 upper bits = "bits"
11 * 24 lower bits = "value"
12 * (We could go to 5+27 bits, but who cares.)
13 *
14 * We use the _most_ significant bits of the 24 bit value. This makes
15 * values logically sort.
16 *
17 * Unfortunately, because the "bits" field is still in the high bits, we
18 * can't sort encoded frags numerically. However, it does allow you
19 * to feed encoded frags as values into frag_contains_value.
20 */
21static inline __u32 ceph_frag_make(__u32 b, __u32 v)
22{
23 return (b << 24) |
24 (v & (0xffffffu << (24-b)) & 0xffffffu);
25}
26static inline __u32 ceph_frag_bits(__u32 f)
27{
28 return f >> 24;
29}
30static inline __u32 ceph_frag_value(__u32 f)
31{
32 return f & 0xffffffu;
33}
34static inline __u32 ceph_frag_mask(__u32 f)
35{
36 return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu;
37}
38static inline __u32 ceph_frag_mask_shift(__u32 f)
39{
40 return 24 - ceph_frag_bits(f);
41}
42
43static inline int ceph_frag_contains_value(__u32 f, __u32 v)
44{
45 return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
46}
47static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
48{
49 /* is sub as specific as us, and contained by us? */
50 return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
51 (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
52}
53
54static inline __u32 ceph_frag_parent(__u32 f)
55{
56 return ceph_frag_make(ceph_frag_bits(f) - 1,
57 ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
58}
59static inline int ceph_frag_is_left_child(__u32 f)
60{
61 return ceph_frag_bits(f) > 0 &&
62 (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
63}
64static inline int ceph_frag_is_right_child(__u32 f)
65{
66 return ceph_frag_bits(f) > 0 &&
67 (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
68}
69static inline __u32 ceph_frag_sibling(__u32 f)
70{
71 return ceph_frag_make(ceph_frag_bits(f),
72 ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
73}
74static inline __u32 ceph_frag_left_child(__u32 f)
75{
76 return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
77}
78static inline __u32 ceph_frag_right_child(__u32 f)
79{
80 return ceph_frag_make(ceph_frag_bits(f)+1,
81 ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
82}
83static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
84{
85 int newbits = ceph_frag_bits(f) + by;
86 return ceph_frag_make(newbits,
87 ceph_frag_value(f) | (i << (24 - newbits)));
88}
89static inline int ceph_frag_is_leftmost(__u32 f)
90{
91 return ceph_frag_value(f) == 0;
92}
93static inline int ceph_frag_is_rightmost(__u32 f)
94{
95 return ceph_frag_value(f) == ceph_frag_mask(f);
96}
97static inline __u32 ceph_frag_next(__u32 f)
98{
99 return ceph_frag_make(ceph_frag_bits(f),
100 ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f)));
101}
102
103/*
104 * comparator to sort frags logically, as when traversing the
105 * number space in ascending order...
106 */
107int ceph_frag_compare(__u32 a, __u32 b);
108
109#endif
diff --git a/fs/ceph/ceph_fs.c b/fs/ceph/ceph_fs.c
deleted file mode 100644
index 3ac6cc7c1156..000000000000
--- a/fs/ceph/ceph_fs.c
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * Some non-inline ceph helpers
3 */
4#include "types.h"
5
6/*
7 * return true if @layout appears to be valid
8 */
9int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
10{
11 __u32 su = le32_to_cpu(layout->fl_stripe_unit);
12 __u32 sc = le32_to_cpu(layout->fl_stripe_count);
13 __u32 os = le32_to_cpu(layout->fl_object_size);
14
15 /* stripe unit, object size must be non-zero, 64k increment */
16 if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
17 return 0;
18 if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
19 return 0;
20 /* object size must be a multiple of stripe unit */
21 if (os < su || os % su)
22 return 0;
23 /* stripe count must be non-zero */
24 if (!sc)
25 return 0;
26 return 1;
27}
28
29
30int ceph_flags_to_mode(int flags)
31{
32 int mode;
33
34#ifdef O_DIRECTORY /* fixme */
35 if ((flags & O_DIRECTORY) == O_DIRECTORY)
36 return CEPH_FILE_MODE_PIN;
37#endif
38 if ((flags & O_APPEND) == O_APPEND)
39 flags |= O_WRONLY;
40
41 if ((flags & O_ACCMODE) == O_RDWR)
42 mode = CEPH_FILE_MODE_RDWR;
43 else if ((flags & O_ACCMODE) == O_WRONLY)
44 mode = CEPH_FILE_MODE_WR;
45 else
46 mode = CEPH_FILE_MODE_RD;
47
48#ifdef O_LAZY
49 if (flags & O_LAZY)
50 mode |= CEPH_FILE_MODE_LAZY;
51#endif
52
53 return mode;
54}
55
56int ceph_caps_for_mode(int mode)
57{
58 int caps = CEPH_CAP_PIN;
59
60 if (mode & CEPH_FILE_MODE_RD)
61 caps |= CEPH_CAP_FILE_SHARED |
62 CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
63 if (mode & CEPH_FILE_MODE_WR)
64 caps |= CEPH_CAP_FILE_EXCL |
65 CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
66 CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
67 CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
68 if (mode & CEPH_FILE_MODE_LAZY)
69 caps |= CEPH_CAP_FILE_LAZYIO;
70
71 return caps;
72}
diff --git a/fs/ceph/ceph_fs.h b/fs/ceph/ceph_fs.h
deleted file mode 100644
index d5619ac86711..000000000000
--- a/fs/ceph/ceph_fs.h
+++ /dev/null
@@ -1,728 +0,0 @@
1/*
2 * ceph_fs.h - Ceph constants and data types to share between kernel and
3 * user space.
4 *
5 * Most types in this file are defined as little-endian, and are
6 * primarily intended to describe data structures that pass over the
7 * wire or that are stored on disk.
8 *
9 * LGPL2
10 */
11
12#ifndef CEPH_FS_H
13#define CEPH_FS_H
14
15#include "msgr.h"
16#include "rados.h"
17
18/*
19 * subprotocol versions. when specific messages types or high-level
20 * protocols change, bump the affected components. we keep rev
21 * internal cluster protocols separately from the public,
22 * client-facing protocol.
23 */
24#define CEPH_OSD_PROTOCOL 8 /* cluster internal */
25#define CEPH_MDS_PROTOCOL 12 /* cluster internal */
26#define CEPH_MON_PROTOCOL 5 /* cluster internal */
27#define CEPH_OSDC_PROTOCOL 24 /* server/client */
28#define CEPH_MDSC_PROTOCOL 32 /* server/client */
29#define CEPH_MONC_PROTOCOL 15 /* server/client */
30
31
32#define CEPH_INO_ROOT 1
33#define CEPH_INO_CEPH 2 /* hidden .ceph dir */
34
35/* arbitrary limit on max # of monitors (cluster of 3 is typical) */
36#define CEPH_MAX_MON 31
37
38
39/*
40 * feature bits
41 */
42#define CEPH_FEATURE_UID (1<<0)
43#define CEPH_FEATURE_NOSRCADDR (1<<1)
44#define CEPH_FEATURE_MONCLOCKCHECK (1<<2)
45#define CEPH_FEATURE_FLOCK (1<<3)
46
47
48/*
49 * ceph_file_layout - describe data layout for a file/inode
50 */
51struct ceph_file_layout {
52 /* file -> object mapping */
53 __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple
54 of page size. */
55 __le32 fl_stripe_count; /* over this many objects */
56 __le32 fl_object_size; /* until objects are this big, then move to
57 new objects */
58 __le32 fl_cas_hash; /* 0 = none; 1 = sha256 */
59
60 /* pg -> disk layout */
61 __le32 fl_object_stripe_unit; /* for per-object parity, if any */
62
63 /* object -> pg layout */
64 __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */
65 __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */
66} __attribute__ ((packed));
67
68#define CEPH_MIN_STRIPE_UNIT 65536
69
70int ceph_file_layout_is_valid(const struct ceph_file_layout *layout);
71
72
73/* crypto algorithms */
74#define CEPH_CRYPTO_NONE 0x0
75#define CEPH_CRYPTO_AES 0x1
76
77#define CEPH_AES_IV "cephsageyudagreg"
78
79/* security/authentication protocols */
80#define CEPH_AUTH_UNKNOWN 0x0
81#define CEPH_AUTH_NONE 0x1
82#define CEPH_AUTH_CEPHX 0x2
83
84#define CEPH_AUTH_UID_DEFAULT ((__u64) -1)
85
86
87/*********************************************
88 * message layer
89 */
90
91/*
92 * message types
93 */
94
95/* misc */
96#define CEPH_MSG_SHUTDOWN 1
97#define CEPH_MSG_PING 2
98
99/* client <-> monitor */
100#define CEPH_MSG_MON_MAP 4
101#define CEPH_MSG_MON_GET_MAP 5
102#define CEPH_MSG_STATFS 13
103#define CEPH_MSG_STATFS_REPLY 14
104#define CEPH_MSG_MON_SUBSCRIBE 15
105#define CEPH_MSG_MON_SUBSCRIBE_ACK 16
106#define CEPH_MSG_AUTH 17
107#define CEPH_MSG_AUTH_REPLY 18
108
109/* client <-> mds */
110#define CEPH_MSG_MDS_MAP 21
111
112#define CEPH_MSG_CLIENT_SESSION 22
113#define CEPH_MSG_CLIENT_RECONNECT 23
114
115#define CEPH_MSG_CLIENT_REQUEST 24
116#define CEPH_MSG_CLIENT_REQUEST_FORWARD 25
117#define CEPH_MSG_CLIENT_REPLY 26
118#define CEPH_MSG_CLIENT_CAPS 0x310
119#define CEPH_MSG_CLIENT_LEASE 0x311
120#define CEPH_MSG_CLIENT_SNAP 0x312
121#define CEPH_MSG_CLIENT_CAPRELEASE 0x313
122
123/* pool ops */
124#define CEPH_MSG_POOLOP_REPLY 48
125#define CEPH_MSG_POOLOP 49
126
127
128/* osd */
129#define CEPH_MSG_OSD_MAP 41
130#define CEPH_MSG_OSD_OP 42
131#define CEPH_MSG_OSD_OPREPLY 43
132
133/* pool operations */
134enum {
135 POOL_OP_CREATE = 0x01,
136 POOL_OP_DELETE = 0x02,
137 POOL_OP_AUID_CHANGE = 0x03,
138 POOL_OP_CREATE_SNAP = 0x11,
139 POOL_OP_DELETE_SNAP = 0x12,
140 POOL_OP_CREATE_UNMANAGED_SNAP = 0x21,
141 POOL_OP_DELETE_UNMANAGED_SNAP = 0x22,
142};
143
144struct ceph_mon_request_header {
145 __le64 have_version;
146 __le16 session_mon;
147 __le64 session_mon_tid;
148} __attribute__ ((packed));
149
150struct ceph_mon_statfs {
151 struct ceph_mon_request_header monhdr;
152 struct ceph_fsid fsid;
153} __attribute__ ((packed));
154
155struct ceph_statfs {
156 __le64 kb, kb_used, kb_avail;
157 __le64 num_objects;
158} __attribute__ ((packed));
159
160struct ceph_mon_statfs_reply {
161 struct ceph_fsid fsid;
162 __le64 version;
163 struct ceph_statfs st;
164} __attribute__ ((packed));
165
166const char *ceph_pool_op_name(int op);
167
168struct ceph_mon_poolop {
169 struct ceph_mon_request_header monhdr;
170 struct ceph_fsid fsid;
171 __le32 pool;
172 __le32 op;
173 __le64 auid;
174 __le64 snapid;
175 __le32 name_len;
176} __attribute__ ((packed));
177
178struct ceph_mon_poolop_reply {
179 struct ceph_mon_request_header monhdr;
180 struct ceph_fsid fsid;
181 __le32 reply_code;
182 __le32 epoch;
183 char has_data;
184 char data[0];
185} __attribute__ ((packed));
186
187struct ceph_mon_unmanaged_snap {
188 __le64 snapid;
189} __attribute__ ((packed));
190
191struct ceph_osd_getmap {
192 struct ceph_mon_request_header monhdr;
193 struct ceph_fsid fsid;
194 __le32 start;
195} __attribute__ ((packed));
196
197struct ceph_mds_getmap {
198 struct ceph_mon_request_header monhdr;
199 struct ceph_fsid fsid;
200} __attribute__ ((packed));
201
202struct ceph_client_mount {
203 struct ceph_mon_request_header monhdr;
204} __attribute__ ((packed));
205
206struct ceph_mon_subscribe_item {
207 __le64 have_version; __le64 have;
208 __u8 onetime;
209} __attribute__ ((packed));
210
211struct ceph_mon_subscribe_ack {
212 __le32 duration; /* seconds */
213 struct ceph_fsid fsid;
214} __attribute__ ((packed));
215
216/*
217 * mds states
218 * > 0 -> in
219 * <= 0 -> out
220 */
221#define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */
222#define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees.
223 empty log. */
224#define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */
225#define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */
226#define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */
227#define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */
228#define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */
229
230#define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */
231#define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed
232 operations (import, rename, etc.) */
233#define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */
234#define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */
235#define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */
236#define CEPH_MDS_STATE_ACTIVE 13 /* up, active */
237#define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */
238
239extern const char *ceph_mds_state_name(int s);
240
241
242/*
243 * metadata lock types.
244 * - these are bitmasks.. we can compose them
245 * - they also define the lock ordering by the MDS
246 * - a few of these are internal to the mds
247 */
248#define CEPH_LOCK_DVERSION 1
249#define CEPH_LOCK_DN 2
250#define CEPH_LOCK_ISNAP 16
251#define CEPH_LOCK_IVERSION 32 /* mds internal */
252#define CEPH_LOCK_IFILE 64
253#define CEPH_LOCK_IAUTH 128
254#define CEPH_LOCK_ILINK 256
255#define CEPH_LOCK_IDFT 512 /* dir frag tree */
256#define CEPH_LOCK_INEST 1024 /* mds internal */
257#define CEPH_LOCK_IXATTR 2048
258#define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */
259#define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */
260
261/* client_session ops */
262enum {
263 CEPH_SESSION_REQUEST_OPEN,
264 CEPH_SESSION_OPEN,
265 CEPH_SESSION_REQUEST_CLOSE,
266 CEPH_SESSION_CLOSE,
267 CEPH_SESSION_REQUEST_RENEWCAPS,
268 CEPH_SESSION_RENEWCAPS,
269 CEPH_SESSION_STALE,
270 CEPH_SESSION_RECALL_STATE,
271};
272
273extern const char *ceph_session_op_name(int op);
274
275struct ceph_mds_session_head {
276 __le32 op;
277 __le64 seq;
278 struct ceph_timespec stamp;
279 __le32 max_caps, max_leases;
280} __attribute__ ((packed));
281
282/* client_request */
283/*
284 * metadata ops.
285 * & 0x001000 -> write op
286 * & 0x010000 -> follow symlink (e.g. stat(), not lstat()).
287 & & 0x100000 -> use weird ino/path trace
288 */
289#define CEPH_MDS_OP_WRITE 0x001000
290enum {
291 CEPH_MDS_OP_LOOKUP = 0x00100,
292 CEPH_MDS_OP_GETATTR = 0x00101,
293 CEPH_MDS_OP_LOOKUPHASH = 0x00102,
294 CEPH_MDS_OP_LOOKUPPARENT = 0x00103,
295
296 CEPH_MDS_OP_SETXATTR = 0x01105,
297 CEPH_MDS_OP_RMXATTR = 0x01106,
298 CEPH_MDS_OP_SETLAYOUT = 0x01107,
299 CEPH_MDS_OP_SETATTR = 0x01108,
300 CEPH_MDS_OP_SETFILELOCK= 0x01109,
301 CEPH_MDS_OP_GETFILELOCK= 0x00110,
302
303 CEPH_MDS_OP_MKNOD = 0x01201,
304 CEPH_MDS_OP_LINK = 0x01202,
305 CEPH_MDS_OP_UNLINK = 0x01203,
306 CEPH_MDS_OP_RENAME = 0x01204,
307 CEPH_MDS_OP_MKDIR = 0x01220,
308 CEPH_MDS_OP_RMDIR = 0x01221,
309 CEPH_MDS_OP_SYMLINK = 0x01222,
310
311 CEPH_MDS_OP_CREATE = 0x01301,
312 CEPH_MDS_OP_OPEN = 0x00302,
313 CEPH_MDS_OP_READDIR = 0x00305,
314
315 CEPH_MDS_OP_LOOKUPSNAP = 0x00400,
316 CEPH_MDS_OP_MKSNAP = 0x01400,
317 CEPH_MDS_OP_RMSNAP = 0x01401,
318 CEPH_MDS_OP_LSSNAP = 0x00402,
319};
320
321extern const char *ceph_mds_op_name(int op);
322
323
324#define CEPH_SETATTR_MODE 1
325#define CEPH_SETATTR_UID 2
326#define CEPH_SETATTR_GID 4
327#define CEPH_SETATTR_MTIME 8
328#define CEPH_SETATTR_ATIME 16
329#define CEPH_SETATTR_SIZE 32
330#define CEPH_SETATTR_CTIME 64
331
332union ceph_mds_request_args {
333 struct {
334 __le32 mask; /* CEPH_CAP_* */
335 } __attribute__ ((packed)) getattr;
336 struct {
337 __le32 mode;
338 __le32 uid;
339 __le32 gid;
340 struct ceph_timespec mtime;
341 struct ceph_timespec atime;
342 __le64 size, old_size; /* old_size needed by truncate */
343 __le32 mask; /* CEPH_SETATTR_* */
344 } __attribute__ ((packed)) setattr;
345 struct {
346 __le32 frag; /* which dir fragment */
347 __le32 max_entries; /* how many dentries to grab */
348 __le32 max_bytes;
349 } __attribute__ ((packed)) readdir;
350 struct {
351 __le32 mode;
352 __le32 rdev;
353 } __attribute__ ((packed)) mknod;
354 struct {
355 __le32 mode;
356 } __attribute__ ((packed)) mkdir;
357 struct {
358 __le32 flags;
359 __le32 mode;
360 __le32 stripe_unit; /* layout for newly created file */
361 __le32 stripe_count; /* ... */
362 __le32 object_size;
363 __le32 file_replication;
364 __le32 preferred;
365 } __attribute__ ((packed)) open;
366 struct {
367 __le32 flags;
368 } __attribute__ ((packed)) setxattr;
369 struct {
370 struct ceph_file_layout layout;
371 } __attribute__ ((packed)) setlayout;
372 struct {
373 __u8 rule; /* currently fcntl or flock */
374 __u8 type; /* shared, exclusive, remove*/
375 __le64 pid; /* process id requesting the lock */
376 __le64 pid_namespace;
377 __le64 start; /* initial location to lock */
378 __le64 length; /* num bytes to lock from start */
379 __u8 wait; /* will caller wait for lock to become available? */
380 } __attribute__ ((packed)) filelock_change;
381} __attribute__ ((packed));
382
383#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
384#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
385
386struct ceph_mds_request_head {
387 __le64 oldest_client_tid;
388 __le32 mdsmap_epoch; /* on client */
389 __le32 flags; /* CEPH_MDS_FLAG_* */
390 __u8 num_retry, num_fwd; /* count retry, fwd attempts */
391 __le16 num_releases; /* # include cap/lease release records */
392 __le32 op; /* mds op code */
393 __le32 caller_uid, caller_gid;
394 __le64 ino; /* use this ino for openc, mkdir, mknod,
395 etc. (if replaying) */
396 union ceph_mds_request_args args;
397} __attribute__ ((packed));
398
399/* cap/lease release record */
400struct ceph_mds_request_release {
401 __le64 ino, cap_id; /* ino and unique cap id */
402 __le32 caps, wanted; /* new issued, wanted */
403 __le32 seq, issue_seq, mseq;
404 __le32 dname_seq; /* if releasing a dentry lease, a */
405 __le32 dname_len; /* string follows. */
406} __attribute__ ((packed));
407
408/* client reply */
409struct ceph_mds_reply_head {
410 __le32 op;
411 __le32 result;
412 __le32 mdsmap_epoch;
413 __u8 safe; /* true if committed to disk */
414 __u8 is_dentry, is_target; /* true if dentry, target inode records
415 are included with reply */
416} __attribute__ ((packed));
417
418/* one for each node split */
419struct ceph_frag_tree_split {
420 __le32 frag; /* this frag splits... */
421 __le32 by; /* ...by this many bits */
422} __attribute__ ((packed));
423
424struct ceph_frag_tree_head {
425 __le32 nsplits; /* num ceph_frag_tree_split records */
426 struct ceph_frag_tree_split splits[];
427} __attribute__ ((packed));
428
429/* capability issue, for bundling with mds reply */
430struct ceph_mds_reply_cap {
431 __le32 caps, wanted; /* caps issued, wanted */
432 __le64 cap_id;
433 __le32 seq, mseq;
434 __le64 realm; /* snap realm */
435 __u8 flags; /* CEPH_CAP_FLAG_* */
436} __attribute__ ((packed));
437
438#define CEPH_CAP_FLAG_AUTH 1 /* cap is issued by auth mds */
439
440/* inode record, for bundling with mds reply */
441struct ceph_mds_reply_inode {
442 __le64 ino;
443 __le64 snapid;
444 __le32 rdev;
445 __le64 version; /* inode version */
446 __le64 xattr_version; /* version for xattr blob */
447 struct ceph_mds_reply_cap cap; /* caps issued for this inode */
448 struct ceph_file_layout layout;
449 struct ceph_timespec ctime, mtime, atime;
450 __le32 time_warp_seq;
451 __le64 size, max_size, truncate_size;
452 __le32 truncate_seq;
453 __le32 mode, uid, gid;
454 __le32 nlink;
455 __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */
456 struct ceph_timespec rctime;
457 struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */
458} __attribute__ ((packed));
459/* followed by frag array, then symlink string, then xattr blob */
460
461/* reply_lease follows dname, and reply_inode */
462struct ceph_mds_reply_lease {
463 __le16 mask; /* lease type(s) */
464 __le32 duration_ms; /* lease duration */
465 __le32 seq;
466} __attribute__ ((packed));
467
468struct ceph_mds_reply_dirfrag {
469 __le32 frag; /* fragment */
470 __le32 auth; /* auth mds, if this is a delegation point */
471 __le32 ndist; /* number of mds' this is replicated on */
472 __le32 dist[];
473} __attribute__ ((packed));
474
475#define CEPH_LOCK_FCNTL 1
476#define CEPH_LOCK_FLOCK 2
477
478#define CEPH_LOCK_SHARED 1
479#define CEPH_LOCK_EXCL 2
480#define CEPH_LOCK_UNLOCK 4
481
482struct ceph_filelock {
483 __le64 start;/* file offset to start lock at */
484 __le64 length; /* num bytes to lock; 0 for all following start */
485 __le64 client; /* which client holds the lock */
486 __le64 pid; /* process id holding the lock on the client */
487 __le64 pid_namespace;
488 __u8 type; /* shared lock, exclusive lock, or unlock */
489} __attribute__ ((packed));
490
491
492/* file access modes */
493#define CEPH_FILE_MODE_PIN 0
494#define CEPH_FILE_MODE_RD 1
495#define CEPH_FILE_MODE_WR 2
496#define CEPH_FILE_MODE_RDWR 3 /* RD | WR */
497#define CEPH_FILE_MODE_LAZY 4 /* lazy io */
498#define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */
499
500int ceph_flags_to_mode(int flags);
501
502
503/* capability bits */
504#define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */
505
506/* generic cap bits */
507#define CEPH_CAP_GSHARED 1 /* client can reads */
508#define CEPH_CAP_GEXCL 2 /* client can read and update */
509#define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */
510#define CEPH_CAP_GRD 8 /* (file) client can read */
511#define CEPH_CAP_GWR 16 /* (file) client can write */
512#define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */
513#define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */
514#define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */
515
516/* per-lock shift */
517#define CEPH_CAP_SAUTH 2
518#define CEPH_CAP_SLINK 4
519#define CEPH_CAP_SXATTR 6
520#define CEPH_CAP_SFILE 8
521#define CEPH_CAP_SFLOCK 20
522
523#define CEPH_CAP_BITS 22
524
525/* composed values */
526#define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH)
527#define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH)
528#define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK)
529#define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK)
530#define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR)
531#define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR)
532#define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE)
533#define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE)
534#define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE)
535#define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE)
536#define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE)
537#define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE)
538#define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE)
539#define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE)
540#define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE)
541#define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK)
542#define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK)
543
544
545/* cap masks (for getattr) */
546#define CEPH_STAT_CAP_INODE CEPH_CAP_PIN
547#define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */
548#define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN
549#define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED
550#define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED
551#define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED
552#define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED
553#define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED
554#define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED
555#define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED
556#define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */
557#define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED
558#define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \
559 CEPH_CAP_AUTH_SHARED | \
560 CEPH_CAP_LINK_SHARED | \
561 CEPH_CAP_FILE_SHARED | \
562 CEPH_CAP_XATTR_SHARED)
563
564#define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \
565 CEPH_CAP_LINK_SHARED | \
566 CEPH_CAP_XATTR_SHARED | \
567 CEPH_CAP_FILE_SHARED)
568#define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \
569 CEPH_CAP_FILE_CACHE)
570
571#define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \
572 CEPH_CAP_LINK_EXCL | \
573 CEPH_CAP_XATTR_EXCL | \
574 CEPH_CAP_FILE_EXCL)
575#define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \
576 CEPH_CAP_FILE_EXCL)
577#define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR)
578#define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \
579 CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \
580 CEPH_CAP_PIN)
581
582#define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \
583 CEPH_LOCK_IXATTR)
584
585int ceph_caps_for_mode(int mode);
586
587enum {
588 CEPH_CAP_OP_GRANT, /* mds->client grant */
589 CEPH_CAP_OP_REVOKE, /* mds->client revoke */
590 CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */
591 CEPH_CAP_OP_EXPORT, /* mds has exported the cap */
592 CEPH_CAP_OP_IMPORT, /* mds has imported the cap */
593 CEPH_CAP_OP_UPDATE, /* client->mds update */
594 CEPH_CAP_OP_DROP, /* client->mds drop cap bits */
595 CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */
596 CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */
597 CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */
598 CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */
599 CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */
600 CEPH_CAP_OP_RENEW, /* client->mds renewal request */
601};
602
603extern const char *ceph_cap_op_name(int op);
604
605/*
606 * caps message, used for capability callbacks, acks, requests, etc.
607 */
608struct ceph_mds_caps {
609 __le32 op; /* CEPH_CAP_OP_* */
610 __le64 ino, realm;
611 __le64 cap_id;
612 __le32 seq, issue_seq;
613 __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */
614 __le32 migrate_seq;
615 __le64 snap_follows;
616 __le32 snap_trace_len;
617
618 /* authlock */
619 __le32 uid, gid, mode;
620
621 /* linklock */
622 __le32 nlink;
623
624 /* xattrlock */
625 __le32 xattr_len;
626 __le64 xattr_version;
627
628 /* filelock */
629 __le64 size, max_size, truncate_size;
630 __le32 truncate_seq;
631 struct ceph_timespec mtime, atime, ctime;
632 struct ceph_file_layout layout;
633 __le32 time_warp_seq;
634} __attribute__ ((packed));
635
636/* cap release msg head */
637struct ceph_mds_cap_release {
638 __le32 num; /* number of cap_items that follow */
639} __attribute__ ((packed));
640
641struct ceph_mds_cap_item {
642 __le64 ino;
643 __le64 cap_id;
644 __le32 migrate_seq, seq;
645} __attribute__ ((packed));
646
647#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */
648#define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */
649#define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */
650#define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */
651
652extern const char *ceph_lease_op_name(int o);
653
654/* lease msg header */
655struct ceph_mds_lease {
656 __u8 action; /* CEPH_MDS_LEASE_* */
657 __le16 mask; /* which lease */
658 __le64 ino;
659 __le64 first, last; /* snap range */
660 __le32 seq;
661 __le32 duration_ms; /* duration of renewal */
662} __attribute__ ((packed));
663/* followed by a __le32+string for dname */
664
665/* client reconnect */
666struct ceph_mds_cap_reconnect {
667 __le64 cap_id;
668 __le32 wanted;
669 __le32 issued;
670 __le64 snaprealm;
671 __le64 pathbase; /* base ino for our path to this ino */
672 __le32 flock_len; /* size of flock state blob, if any */
673} __attribute__ ((packed));
674/* followed by flock blob */
675
676struct ceph_mds_cap_reconnect_v1 {
677 __le64 cap_id;
678 __le32 wanted;
679 __le32 issued;
680 __le64 size;
681 struct ceph_timespec mtime, atime;
682 __le64 snaprealm;
683 __le64 pathbase; /* base ino for our path to this ino */
684} __attribute__ ((packed));
685
686struct ceph_mds_snaprealm_reconnect {
687 __le64 ino; /* snap realm base */
688 __le64 seq; /* snap seq for this snap realm */
689 __le64 parent; /* parent realm */
690} __attribute__ ((packed));
691
692/*
693 * snaps
694 */
695enum {
696 CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */
697 CEPH_SNAP_OP_CREATE,
698 CEPH_SNAP_OP_DESTROY,
699 CEPH_SNAP_OP_SPLIT,
700};
701
702extern const char *ceph_snap_op_name(int o);
703
704/* snap msg header */
705struct ceph_mds_snap_head {
706 __le32 op; /* CEPH_SNAP_OP_* */
707 __le64 split; /* ino to split off, if any */
708 __le32 num_split_inos; /* # inos belonging to new child realm */
709 __le32 num_split_realms; /* # child realms udner new child realm */
710 __le32 trace_len; /* size of snap trace blob */
711} __attribute__ ((packed));
712/* followed by split ino list, then split realms, then the trace blob */
713
714/*
715 * encode info about a snaprealm, as viewed by a client
716 */
717struct ceph_mds_snap_realm {
718 __le64 ino; /* ino */
719 __le64 created; /* snap: when created */
720 __le64 parent; /* ino: parent realm */
721 __le64 parent_since; /* snap: same parent since */
722 __le64 seq; /* snap: version */
723 __le32 num_snaps;
724 __le32 num_prior_parent_snaps;
725} __attribute__ ((packed));
726/* followed by my snap list, then prior parent snap list */
727
728#endif
diff --git a/fs/ceph/ceph_hash.c b/fs/ceph/ceph_hash.c
deleted file mode 100644
index bd570015d147..000000000000
--- a/fs/ceph/ceph_hash.c
+++ /dev/null
@@ -1,118 +0,0 @@
1
2#include "types.h"
3
4/*
5 * Robert Jenkin's hash function.
6 * http://burtleburtle.net/bob/hash/evahash.html
7 * This is in the public domain.
8 */
9#define mix(a, b, c) \
10 do { \
11 a = a - b; a = a - c; a = a ^ (c >> 13); \
12 b = b - c; b = b - a; b = b ^ (a << 8); \
13 c = c - a; c = c - b; c = c ^ (b >> 13); \
14 a = a - b; a = a - c; a = a ^ (c >> 12); \
15 b = b - c; b = b - a; b = b ^ (a << 16); \
16 c = c - a; c = c - b; c = c ^ (b >> 5); \
17 a = a - b; a = a - c; a = a ^ (c >> 3); \
18 b = b - c; b = b - a; b = b ^ (a << 10); \
19 c = c - a; c = c - b; c = c ^ (b >> 15); \
20 } while (0)
21
22unsigned ceph_str_hash_rjenkins(const char *str, unsigned length)
23{
24 const unsigned char *k = (const unsigned char *)str;
25 __u32 a, b, c; /* the internal state */
26 __u32 len; /* how many key bytes still need mixing */
27
28 /* Set up the internal state */
29 len = length;
30 a = 0x9e3779b9; /* the golden ratio; an arbitrary value */
31 b = a;
32 c = 0; /* variable initialization of internal state */
33
34 /* handle most of the key */
35 while (len >= 12) {
36 a = a + (k[0] + ((__u32)k[1] << 8) + ((__u32)k[2] << 16) +
37 ((__u32)k[3] << 24));
38 b = b + (k[4] + ((__u32)k[5] << 8) + ((__u32)k[6] << 16) +
39 ((__u32)k[7] << 24));
40 c = c + (k[8] + ((__u32)k[9] << 8) + ((__u32)k[10] << 16) +
41 ((__u32)k[11] << 24));
42 mix(a, b, c);
43 k = k + 12;
44 len = len - 12;
45 }
46
47 /* handle the last 11 bytes */
48 c = c + length;
49 switch (len) { /* all the case statements fall through */
50 case 11:
51 c = c + ((__u32)k[10] << 24);
52 case 10:
53 c = c + ((__u32)k[9] << 16);
54 case 9:
55 c = c + ((__u32)k[8] << 8);
56 /* the first byte of c is reserved for the length */
57 case 8:
58 b = b + ((__u32)k[7] << 24);
59 case 7:
60 b = b + ((__u32)k[6] << 16);
61 case 6:
62 b = b + ((__u32)k[5] << 8);
63 case 5:
64 b = b + k[4];
65 case 4:
66 a = a + ((__u32)k[3] << 24);
67 case 3:
68 a = a + ((__u32)k[2] << 16);
69 case 2:
70 a = a + ((__u32)k[1] << 8);
71 case 1:
72 a = a + k[0];
73 /* case 0: nothing left to add */
74 }
75 mix(a, b, c);
76
77 return c;
78}
79
80/*
81 * linux dcache hash
82 */
83unsigned ceph_str_hash_linux(const char *str, unsigned length)
84{
85 unsigned long hash = 0;
86 unsigned char c;
87
88 while (length--) {
89 c = *str++;
90 hash = (hash + (c << 4) + (c >> 4)) * 11;
91 }
92 return hash;
93}
94
95
96unsigned ceph_str_hash(int type, const char *s, unsigned len)
97{
98 switch (type) {
99 case CEPH_STR_HASH_LINUX:
100 return ceph_str_hash_linux(s, len);
101 case CEPH_STR_HASH_RJENKINS:
102 return ceph_str_hash_rjenkins(s, len);
103 default:
104 return -1;
105 }
106}
107
108const char *ceph_str_hash_name(int type)
109{
110 switch (type) {
111 case CEPH_STR_HASH_LINUX:
112 return "linux";
113 case CEPH_STR_HASH_RJENKINS:
114 return "rjenkins";
115 default:
116 return "unknown";
117 }
118}
diff --git a/fs/ceph/ceph_hash.h b/fs/ceph/ceph_hash.h
deleted file mode 100644
index d099c3f90236..000000000000
--- a/fs/ceph/ceph_hash.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef FS_CEPH_HASH_H
2#define FS_CEPH_HASH_H
3
4#define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */
5#define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */
6
7extern unsigned ceph_str_hash_linux(const char *s, unsigned len);
8extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len);
9
10extern unsigned ceph_str_hash(int type, const char *s, unsigned len);
11extern const char *ceph_str_hash_name(int type);
12
13#endif
diff --git a/fs/ceph/crush/crush.c b/fs/ceph/crush/crush.c
deleted file mode 100644
index fabd302e5779..000000000000
--- a/fs/ceph/crush/crush.c
+++ /dev/null
@@ -1,151 +0,0 @@
1
2#ifdef __KERNEL__
3# include <linux/slab.h>
4#else
5# include <stdlib.h>
6# include <assert.h>
7# define kfree(x) do { if (x) free(x); } while (0)
8# define BUG_ON(x) assert(!(x))
9#endif
10
11#include "crush.h"
12
13const char *crush_bucket_alg_name(int alg)
14{
15 switch (alg) {
16 case CRUSH_BUCKET_UNIFORM: return "uniform";
17 case CRUSH_BUCKET_LIST: return "list";
18 case CRUSH_BUCKET_TREE: return "tree";
19 case CRUSH_BUCKET_STRAW: return "straw";
20 default: return "unknown";
21 }
22}
23
24/**
25 * crush_get_bucket_item_weight - Get weight of an item in given bucket
26 * @b: bucket pointer
27 * @p: item index in bucket
28 */
29int crush_get_bucket_item_weight(struct crush_bucket *b, int p)
30{
31 if (p >= b->size)
32 return 0;
33
34 switch (b->alg) {
35 case CRUSH_BUCKET_UNIFORM:
36 return ((struct crush_bucket_uniform *)b)->item_weight;
37 case CRUSH_BUCKET_LIST:
38 return ((struct crush_bucket_list *)b)->item_weights[p];
39 case CRUSH_BUCKET_TREE:
40 if (p & 1)
41 return ((struct crush_bucket_tree *)b)->node_weights[p];
42 return 0;
43 case CRUSH_BUCKET_STRAW:
44 return ((struct crush_bucket_straw *)b)->item_weights[p];
45 }
46 return 0;
47}
48
49/**
50 * crush_calc_parents - Calculate parent vectors for the given crush map.
51 * @map: crush_map pointer
52 */
53void crush_calc_parents(struct crush_map *map)
54{
55 int i, b, c;
56
57 for (b = 0; b < map->max_buckets; b++) {
58 if (map->buckets[b] == NULL)
59 continue;
60 for (i = 0; i < map->buckets[b]->size; i++) {
61 c = map->buckets[b]->items[i];
62 BUG_ON(c >= map->max_devices ||
63 c < -map->max_buckets);
64 if (c >= 0)
65 map->device_parents[c] = map->buckets[b]->id;
66 else
67 map->bucket_parents[-1-c] = map->buckets[b]->id;
68 }
69 }
70}
71
72void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b)
73{
74 kfree(b->h.perm);
75 kfree(b->h.items);
76 kfree(b);
77}
78
79void crush_destroy_bucket_list(struct crush_bucket_list *b)
80{
81 kfree(b->item_weights);
82 kfree(b->sum_weights);
83 kfree(b->h.perm);
84 kfree(b->h.items);
85 kfree(b);
86}
87
88void crush_destroy_bucket_tree(struct crush_bucket_tree *b)
89{
90 kfree(b->node_weights);
91 kfree(b);
92}
93
94void crush_destroy_bucket_straw(struct crush_bucket_straw *b)
95{
96 kfree(b->straws);
97 kfree(b->item_weights);
98 kfree(b->h.perm);
99 kfree(b->h.items);
100 kfree(b);
101}
102
103void crush_destroy_bucket(struct crush_bucket *b)
104{
105 switch (b->alg) {
106 case CRUSH_BUCKET_UNIFORM:
107 crush_destroy_bucket_uniform((struct crush_bucket_uniform *)b);
108 break;
109 case CRUSH_BUCKET_LIST:
110 crush_destroy_bucket_list((struct crush_bucket_list *)b);
111 break;
112 case CRUSH_BUCKET_TREE:
113 crush_destroy_bucket_tree((struct crush_bucket_tree *)b);
114 break;
115 case CRUSH_BUCKET_STRAW:
116 crush_destroy_bucket_straw((struct crush_bucket_straw *)b);
117 break;
118 }
119}
120
121/**
122 * crush_destroy - Destroy a crush_map
123 * @map: crush_map pointer
124 */
125void crush_destroy(struct crush_map *map)
126{
127 int b;
128
129 /* buckets */
130 if (map->buckets) {
131 for (b = 0; b < map->max_buckets; b++) {
132 if (map->buckets[b] == NULL)
133 continue;
134 crush_destroy_bucket(map->buckets[b]);
135 }
136 kfree(map->buckets);
137 }
138
139 /* rules */
140 if (map->rules) {
141 for (b = 0; b < map->max_rules; b++)
142 kfree(map->rules[b]);
143 kfree(map->rules);
144 }
145
146 kfree(map->bucket_parents);
147 kfree(map->device_parents);
148 kfree(map);
149}
150
151
diff --git a/fs/ceph/crush/crush.h b/fs/ceph/crush/crush.h
deleted file mode 100644
index 97e435b191f4..000000000000
--- a/fs/ceph/crush/crush.h
+++ /dev/null
@@ -1,180 +0,0 @@
1#ifndef CEPH_CRUSH_CRUSH_H
2#define CEPH_CRUSH_CRUSH_H
3
4#include <linux/types.h>
5
6/*
7 * CRUSH is a pseudo-random data distribution algorithm that
8 * efficiently distributes input values (typically, data objects)
9 * across a heterogeneous, structured storage cluster.
10 *
11 * The algorithm was originally described in detail in this paper
12 * (although the algorithm has evolved somewhat since then):
13 *
14 * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
15 *
16 * LGPL2
17 */
18
19
20#define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */
21
22
23#define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */
24#define CRUSH_MAX_SET 10 /* max size of a mapping result */
25
26
27/*
28 * CRUSH uses user-defined "rules" to describe how inputs should be
29 * mapped to devices. A rule consists of sequence of steps to perform
30 * to generate the set of output devices.
31 */
32struct crush_rule_step {
33 __u32 op;
34 __s32 arg1;
35 __s32 arg2;
36};
37
38/* step op codes */
39enum {
40 CRUSH_RULE_NOOP = 0,
41 CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */
42 CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */
43 /* arg2 = type */
44 CRUSH_RULE_CHOOSE_INDEP = 3, /* same */
45 CRUSH_RULE_EMIT = 4, /* no args */
46 CRUSH_RULE_CHOOSE_LEAF_FIRSTN = 6,
47 CRUSH_RULE_CHOOSE_LEAF_INDEP = 7,
48};
49
50/*
51 * for specifying choose num (arg1) relative to the max parameter
52 * passed to do_rule
53 */
54#define CRUSH_CHOOSE_N 0
55#define CRUSH_CHOOSE_N_MINUS(x) (-(x))
56
57/*
58 * The rule mask is used to describe what the rule is intended for.
59 * Given a ruleset and size of output set, we search through the
60 * rule list for a matching rule_mask.
61 */
62struct crush_rule_mask {
63 __u8 ruleset;
64 __u8 type;
65 __u8 min_size;
66 __u8 max_size;
67};
68
69struct crush_rule {
70 __u32 len;
71 struct crush_rule_mask mask;
72 struct crush_rule_step steps[0];
73};
74
75#define crush_rule_size(len) (sizeof(struct crush_rule) + \
76 (len)*sizeof(struct crush_rule_step))
77
78
79
80/*
81 * A bucket is a named container of other items (either devices or
82 * other buckets). Items within a bucket are chosen using one of a
83 * few different algorithms. The table summarizes how the speed of
84 * each option measures up against mapping stability when items are
85 * added or removed.
86 *
87 * Bucket Alg Speed Additions Removals
88 * ------------------------------------------------
89 * uniform O(1) poor poor
90 * list O(n) optimal poor
91 * tree O(log n) good good
92 * straw O(n) optimal optimal
93 */
94enum {
95 CRUSH_BUCKET_UNIFORM = 1,
96 CRUSH_BUCKET_LIST = 2,
97 CRUSH_BUCKET_TREE = 3,
98 CRUSH_BUCKET_STRAW = 4
99};
100extern const char *crush_bucket_alg_name(int alg);
101
102struct crush_bucket {
103 __s32 id; /* this'll be negative */
104 __u16 type; /* non-zero; type=0 is reserved for devices */
105 __u8 alg; /* one of CRUSH_BUCKET_* */
106 __u8 hash; /* which hash function to use, CRUSH_HASH_* */
107 __u32 weight; /* 16-bit fixed point */
108 __u32 size; /* num items */
109 __s32 *items;
110
111 /*
112 * cached random permutation: used for uniform bucket and for
113 * the linear search fallback for the other bucket types.
114 */
115 __u32 perm_x; /* @x for which *perm is defined */
116 __u32 perm_n; /* num elements of *perm that are permuted/defined */
117 __u32 *perm;
118};
119
120struct crush_bucket_uniform {
121 struct crush_bucket h;
122 __u32 item_weight; /* 16-bit fixed point; all items equally weighted */
123};
124
125struct crush_bucket_list {
126 struct crush_bucket h;
127 __u32 *item_weights; /* 16-bit fixed point */
128 __u32 *sum_weights; /* 16-bit fixed point. element i is sum
129 of weights 0..i, inclusive */
130};
131
132struct crush_bucket_tree {
133 struct crush_bucket h; /* note: h.size is _tree_ size, not number of
134 actual items */
135 __u8 num_nodes;
136 __u32 *node_weights;
137};
138
139struct crush_bucket_straw {
140 struct crush_bucket h;
141 __u32 *item_weights; /* 16-bit fixed point */
142 __u32 *straws; /* 16-bit fixed point */
143};
144
145
146
147/*
148 * CRUSH map includes all buckets, rules, etc.
149 */
150struct crush_map {
151 struct crush_bucket **buckets;
152 struct crush_rule **rules;
153
154 /*
155 * Parent pointers to identify the parent bucket a device or
156 * bucket in the hierarchy. If an item appears more than
157 * once, this is the _last_ time it appeared (where buckets
158 * are processed in bucket id order, from -1 on down to
159 * -max_buckets.
160 */
161 __u32 *bucket_parents;
162 __u32 *device_parents;
163
164 __s32 max_buckets;
165 __u32 max_rules;
166 __s32 max_devices;
167};
168
169
170/* crush.c */
171extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos);
172extern void crush_calc_parents(struct crush_map *map);
173extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b);
174extern void crush_destroy_bucket_list(struct crush_bucket_list *b);
175extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b);
176extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b);
177extern void crush_destroy_bucket(struct crush_bucket *b);
178extern void crush_destroy(struct crush_map *map);
179
180#endif
diff --git a/fs/ceph/crush/hash.c b/fs/ceph/crush/hash.c
deleted file mode 100644
index 5873aed694bf..000000000000
--- a/fs/ceph/crush/hash.c
+++ /dev/null
@@ -1,149 +0,0 @@
1
2#include <linux/types.h>
3#include "hash.h"
4
5/*
6 * Robert Jenkins' function for mixing 32-bit values
7 * http://burtleburtle.net/bob/hash/evahash.html
8 * a, b = random bits, c = input and output
9 */
10#define crush_hashmix(a, b, c) do { \
11 a = a-b; a = a-c; a = a^(c>>13); \
12 b = b-c; b = b-a; b = b^(a<<8); \
13 c = c-a; c = c-b; c = c^(b>>13); \
14 a = a-b; a = a-c; a = a^(c>>12); \
15 b = b-c; b = b-a; b = b^(a<<16); \
16 c = c-a; c = c-b; c = c^(b>>5); \
17 a = a-b; a = a-c; a = a^(c>>3); \
18 b = b-c; b = b-a; b = b^(a<<10); \
19 c = c-a; c = c-b; c = c^(b>>15); \
20 } while (0)
21
22#define crush_hash_seed 1315423911
23
24static __u32 crush_hash32_rjenkins1(__u32 a)
25{
26 __u32 hash = crush_hash_seed ^ a;
27 __u32 b = a;
28 __u32 x = 231232;
29 __u32 y = 1232;
30 crush_hashmix(b, x, hash);
31 crush_hashmix(y, a, hash);
32 return hash;
33}
34
35static __u32 crush_hash32_rjenkins1_2(__u32 a, __u32 b)
36{
37 __u32 hash = crush_hash_seed ^ a ^ b;
38 __u32 x = 231232;
39 __u32 y = 1232;
40 crush_hashmix(a, b, hash);
41 crush_hashmix(x, a, hash);
42 crush_hashmix(b, y, hash);
43 return hash;
44}
45
46static __u32 crush_hash32_rjenkins1_3(__u32 a, __u32 b, __u32 c)
47{
48 __u32 hash = crush_hash_seed ^ a ^ b ^ c;
49 __u32 x = 231232;
50 __u32 y = 1232;
51 crush_hashmix(a, b, hash);
52 crush_hashmix(c, x, hash);
53 crush_hashmix(y, a, hash);
54 crush_hashmix(b, x, hash);
55 crush_hashmix(y, c, hash);
56 return hash;
57}
58
59static __u32 crush_hash32_rjenkins1_4(__u32 a, __u32 b, __u32 c, __u32 d)
60{
61 __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d;
62 __u32 x = 231232;
63 __u32 y = 1232;
64 crush_hashmix(a, b, hash);
65 crush_hashmix(c, d, hash);
66 crush_hashmix(a, x, hash);
67 crush_hashmix(y, b, hash);
68 crush_hashmix(c, x, hash);
69 crush_hashmix(y, d, hash);
70 return hash;
71}
72
73static __u32 crush_hash32_rjenkins1_5(__u32 a, __u32 b, __u32 c, __u32 d,
74 __u32 e)
75{
76 __u32 hash = crush_hash_seed ^ a ^ b ^ c ^ d ^ e;
77 __u32 x = 231232;
78 __u32 y = 1232;
79 crush_hashmix(a, b, hash);
80 crush_hashmix(c, d, hash);
81 crush_hashmix(e, x, hash);
82 crush_hashmix(y, a, hash);
83 crush_hashmix(b, x, hash);
84 crush_hashmix(y, c, hash);
85 crush_hashmix(d, x, hash);
86 crush_hashmix(y, e, hash);
87 return hash;
88}
89
90
91__u32 crush_hash32(int type, __u32 a)
92{
93 switch (type) {
94 case CRUSH_HASH_RJENKINS1:
95 return crush_hash32_rjenkins1(a);
96 default:
97 return 0;
98 }
99}
100
101__u32 crush_hash32_2(int type, __u32 a, __u32 b)
102{
103 switch (type) {
104 case CRUSH_HASH_RJENKINS1:
105 return crush_hash32_rjenkins1_2(a, b);
106 default:
107 return 0;
108 }
109}
110
111__u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c)
112{
113 switch (type) {
114 case CRUSH_HASH_RJENKINS1:
115 return crush_hash32_rjenkins1_3(a, b, c);
116 default:
117 return 0;
118 }
119}
120
121__u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d)
122{
123 switch (type) {
124 case CRUSH_HASH_RJENKINS1:
125 return crush_hash32_rjenkins1_4(a, b, c, d);
126 default:
127 return 0;
128 }
129}
130
131__u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, __u32 e)
132{
133 switch (type) {
134 case CRUSH_HASH_RJENKINS1:
135 return crush_hash32_rjenkins1_5(a, b, c, d, e);
136 default:
137 return 0;
138 }
139}
140
141const char *crush_hash_name(int type)
142{
143 switch (type) {
144 case CRUSH_HASH_RJENKINS1:
145 return "rjenkins1";
146 default:
147 return "unknown";
148 }
149}
diff --git a/fs/ceph/crush/hash.h b/fs/ceph/crush/hash.h
deleted file mode 100644
index 91e884230d5d..000000000000
--- a/fs/ceph/crush/hash.h
+++ /dev/null
@@ -1,17 +0,0 @@
1#ifndef CEPH_CRUSH_HASH_H
2#define CEPH_CRUSH_HASH_H
3
4#define CRUSH_HASH_RJENKINS1 0
5
6#define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1
7
8extern const char *crush_hash_name(int type);
9
10extern __u32 crush_hash32(int type, __u32 a);
11extern __u32 crush_hash32_2(int type, __u32 a, __u32 b);
12extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c);
13extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d);
14extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d,
15 __u32 e);
16
17#endif
diff --git a/fs/ceph/crush/mapper.c b/fs/ceph/crush/mapper.c
deleted file mode 100644
index a4eec133258e..000000000000
--- a/fs/ceph/crush/mapper.c
+++ /dev/null
@@ -1,609 +0,0 @@
1
2#ifdef __KERNEL__
3# include <linux/string.h>
4# include <linux/slab.h>
5# include <linux/bug.h>
6# include <linux/kernel.h>
7# ifndef dprintk
8# define dprintk(args...)
9# endif
10#else
11# include <string.h>
12# include <stdio.h>
13# include <stdlib.h>
14# include <assert.h>
15# define BUG_ON(x) assert(!(x))
16# define dprintk(args...) /* printf(args) */
17# define kmalloc(x, f) malloc(x)
18# define kfree(x) free(x)
19#endif
20
21#include "crush.h"
22#include "hash.h"
23
24/*
25 * Implement the core CRUSH mapping algorithm.
26 */
27
28/**
29 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
30 * @map: the crush_map
31 * @ruleset: the storage ruleset id (user defined)
32 * @type: storage ruleset type (user defined)
33 * @size: output set size
34 */
35int crush_find_rule(struct crush_map *map, int ruleset, int type, int size)
36{
37 int i;
38
39 for (i = 0; i < map->max_rules; i++) {
40 if (map->rules[i] &&
41 map->rules[i]->mask.ruleset == ruleset &&
42 map->rules[i]->mask.type == type &&
43 map->rules[i]->mask.min_size <= size &&
44 map->rules[i]->mask.max_size >= size)
45 return i;
46 }
47 return -1;
48}
49
50
51/*
52 * bucket choose methods
53 *
54 * For each bucket algorithm, we have a "choose" method that, given a
55 * crush input @x and replica position (usually, position in output set) @r,
56 * will produce an item in the bucket.
57 */
58
59/*
60 * Choose based on a random permutation of the bucket.
61 *
62 * We used to use some prime number arithmetic to do this, but it
63 * wasn't very random, and had some other bad behaviors. Instead, we
64 * calculate an actual random permutation of the bucket members.
65 * Since this is expensive, we optimize for the r=0 case, which
66 * captures the vast majority of calls.
67 */
68static int bucket_perm_choose(struct crush_bucket *bucket,
69 int x, int r)
70{
71 unsigned pr = r % bucket->size;
72 unsigned i, s;
73
74 /* start a new permutation if @x has changed */
75 if (bucket->perm_x != x || bucket->perm_n == 0) {
76 dprintk("bucket %d new x=%d\n", bucket->id, x);
77 bucket->perm_x = x;
78
79 /* optimize common r=0 case */
80 if (pr == 0) {
81 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) %
82 bucket->size;
83 bucket->perm[0] = s;
84 bucket->perm_n = 0xffff; /* magic value, see below */
85 goto out;
86 }
87
88 for (i = 0; i < bucket->size; i++)
89 bucket->perm[i] = i;
90 bucket->perm_n = 0;
91 } else if (bucket->perm_n == 0xffff) {
92 /* clean up after the r=0 case above */
93 for (i = 1; i < bucket->size; i++)
94 bucket->perm[i] = i;
95 bucket->perm[bucket->perm[0]] = 0;
96 bucket->perm_n = 1;
97 }
98
99 /* calculate permutation up to pr */
100 for (i = 0; i < bucket->perm_n; i++)
101 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
102 while (bucket->perm_n <= pr) {
103 unsigned p = bucket->perm_n;
104 /* no point in swapping the final entry */
105 if (p < bucket->size - 1) {
106 i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
107 (bucket->size - p);
108 if (i) {
109 unsigned t = bucket->perm[p + i];
110 bucket->perm[p + i] = bucket->perm[p];
111 bucket->perm[p] = t;
112 }
113 dprintk(" perm_choose swap %d with %d\n", p, p+i);
114 }
115 bucket->perm_n++;
116 }
117 for (i = 0; i < bucket->size; i++)
118 dprintk(" perm_choose %d: %d\n", i, bucket->perm[i]);
119
120 s = bucket->perm[pr];
121out:
122 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id,
123 bucket->size, x, r, pr, s);
124 return bucket->items[s];
125}
126
127/* uniform */
128static int bucket_uniform_choose(struct crush_bucket_uniform *bucket,
129 int x, int r)
130{
131 return bucket_perm_choose(&bucket->h, x, r);
132}
133
134/* list */
135static int bucket_list_choose(struct crush_bucket_list *bucket,
136 int x, int r)
137{
138 int i;
139
140 for (i = bucket->h.size-1; i >= 0; i--) {
141 __u64 w = crush_hash32_4(bucket->h.hash,x, bucket->h.items[i],
142 r, bucket->h.id);
143 w &= 0xffff;
144 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
145 "sw %x rand %llx",
146 i, x, r, bucket->h.items[i], bucket->item_weights[i],
147 bucket->sum_weights[i], w);
148 w *= bucket->sum_weights[i];
149 w = w >> 16;
150 /*dprintk(" scaled %llx\n", w);*/
151 if (w < bucket->item_weights[i])
152 return bucket->h.items[i];
153 }
154
155 BUG_ON(1);
156 return 0;
157}
158
159
160/* (binary) tree */
161static int height(int n)
162{
163 int h = 0;
164 while ((n & 1) == 0) {
165 h++;
166 n = n >> 1;
167 }
168 return h;
169}
170
171static int left(int x)
172{
173 int h = height(x);
174 return x - (1 << (h-1));
175}
176
177static int right(int x)
178{
179 int h = height(x);
180 return x + (1 << (h-1));
181}
182
183static int terminal(int x)
184{
185 return x & 1;
186}
187
188static int bucket_tree_choose(struct crush_bucket_tree *bucket,
189 int x, int r)
190{
191 int n, l;
192 __u32 w;
193 __u64 t;
194
195 /* start at root */
196 n = bucket->num_nodes >> 1;
197
198 while (!terminal(n)) {
199 /* pick point in [0, w) */
200 w = bucket->node_weights[n];
201 t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
202 bucket->h.id) * (__u64)w;
203 t = t >> 32;
204
205 /* descend to the left or right? */
206 l = left(n);
207 if (t < bucket->node_weights[l])
208 n = l;
209 else
210 n = right(n);
211 }
212
213 return bucket->h.items[n >> 1];
214}
215
216
217/* straw */
218
219static int bucket_straw_choose(struct crush_bucket_straw *bucket,
220 int x, int r)
221{
222 int i;
223 int high = 0;
224 __u64 high_draw = 0;
225 __u64 draw;
226
227 for (i = 0; i < bucket->h.size; i++) {
228 draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r);
229 draw &= 0xffff;
230 draw *= bucket->straws[i];
231 if (i == 0 || draw > high_draw) {
232 high = i;
233 high_draw = draw;
234 }
235 }
236 return bucket->h.items[high];
237}
238
239static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
240{
241 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
242 switch (in->alg) {
243 case CRUSH_BUCKET_UNIFORM:
244 return bucket_uniform_choose((struct crush_bucket_uniform *)in,
245 x, r);
246 case CRUSH_BUCKET_LIST:
247 return bucket_list_choose((struct crush_bucket_list *)in,
248 x, r);
249 case CRUSH_BUCKET_TREE:
250 return bucket_tree_choose((struct crush_bucket_tree *)in,
251 x, r);
252 case CRUSH_BUCKET_STRAW:
253 return bucket_straw_choose((struct crush_bucket_straw *)in,
254 x, r);
255 default:
256 BUG_ON(1);
257 return in->items[0];
258 }
259}
260
261/*
262 * true if device is marked "out" (failed, fully offloaded)
263 * of the cluster
264 */
265static int is_out(struct crush_map *map, __u32 *weight, int item, int x)
266{
267 if (weight[item] >= 0x10000)
268 return 0;
269 if (weight[item] == 0)
270 return 1;
271 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff)
272 < weight[item])
273 return 0;
274 return 1;
275}
276
277/**
278 * crush_choose - choose numrep distinct items of given type
279 * @map: the crush_map
280 * @bucket: the bucket we are choose an item from
281 * @x: crush input value
282 * @numrep: the number of items to choose
283 * @type: the type of item to choose
284 * @out: pointer to output vector
285 * @outpos: our position in that vector
286 * @firstn: true if choosing "first n" items, false if choosing "indep"
287 * @recurse_to_leaf: true if we want one device under each item of given type
288 * @out2: second output vector for leaf items (if @recurse_to_leaf)
289 */
290static int crush_choose(struct crush_map *map,
291 struct crush_bucket *bucket,
292 __u32 *weight,
293 int x, int numrep, int type,
294 int *out, int outpos,
295 int firstn, int recurse_to_leaf,
296 int *out2)
297{
298 int rep;
299 int ftotal, flocal;
300 int retry_descent, retry_bucket, skip_rep;
301 struct crush_bucket *in = bucket;
302 int r;
303 int i;
304 int item = 0;
305 int itemtype;
306 int collide, reject;
307 const int orig_tries = 5; /* attempts before we fall back to search */
308
309 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
310 bucket->id, x, outpos, numrep);
311
312 for (rep = outpos; rep < numrep; rep++) {
313 /* keep trying until we get a non-out, non-colliding item */
314 ftotal = 0;
315 skip_rep = 0;
316 do {
317 retry_descent = 0;
318 in = bucket; /* initial bucket */
319
320 /* choose through intervening buckets */
321 flocal = 0;
322 do {
323 collide = 0;
324 retry_bucket = 0;
325 r = rep;
326 if (in->alg == CRUSH_BUCKET_UNIFORM) {
327 /* be careful */
328 if (firstn || numrep >= in->size)
329 /* r' = r + f_total */
330 r += ftotal;
331 else if (in->size % numrep == 0)
332 /* r'=r+(n+1)*f_local */
333 r += (numrep+1) *
334 (flocal+ftotal);
335 else
336 /* r' = r + n*f_local */
337 r += numrep * (flocal+ftotal);
338 } else {
339 if (firstn)
340 /* r' = r + f_total */
341 r += ftotal;
342 else
343 /* r' = r + n*f_local */
344 r += numrep * (flocal+ftotal);
345 }
346
347 /* bucket choose */
348 if (in->size == 0) {
349 reject = 1;
350 goto reject;
351 }
352 if (flocal >= (in->size>>1) &&
353 flocal > orig_tries)
354 item = bucket_perm_choose(in, x, r);
355 else
356 item = crush_bucket_choose(in, x, r);
357 BUG_ON(item >= map->max_devices);
358
359 /* desired type? */
360 if (item < 0)
361 itemtype = map->buckets[-1-item]->type;
362 else
363 itemtype = 0;
364 dprintk(" item %d type %d\n", item, itemtype);
365
366 /* keep going? */
367 if (itemtype != type) {
368 BUG_ON(item >= 0 ||
369 (-1-item) >= map->max_buckets);
370 in = map->buckets[-1-item];
371 retry_bucket = 1;
372 continue;
373 }
374
375 /* collision? */
376 for (i = 0; i < outpos; i++) {
377 if (out[i] == item) {
378 collide = 1;
379 break;
380 }
381 }
382
383 reject = 0;
384 if (recurse_to_leaf) {
385 if (item < 0) {
386 if (crush_choose(map,
387 map->buckets[-1-item],
388 weight,
389 x, outpos+1, 0,
390 out2, outpos,
391 firstn, 0,
392 NULL) <= outpos)
393 /* didn't get leaf */
394 reject = 1;
395 } else {
396 /* we already have a leaf! */
397 out2[outpos] = item;
398 }
399 }
400
401 if (!reject) {
402 /* out? */
403 if (itemtype == 0)
404 reject = is_out(map, weight,
405 item, x);
406 else
407 reject = 0;
408 }
409
410reject:
411 if (reject || collide) {
412 ftotal++;
413 flocal++;
414
415 if (collide && flocal < 3)
416 /* retry locally a few times */
417 retry_bucket = 1;
418 else if (flocal < in->size + orig_tries)
419 /* exhaustive bucket search */
420 retry_bucket = 1;
421 else if (ftotal < 20)
422 /* then retry descent */
423 retry_descent = 1;
424 else
425 /* else give up */
426 skip_rep = 1;
427 dprintk(" reject %d collide %d "
428 "ftotal %d flocal %d\n",
429 reject, collide, ftotal,
430 flocal);
431 }
432 } while (retry_bucket);
433 } while (retry_descent);
434
435 if (skip_rep) {
436 dprintk("skip rep\n");
437 continue;
438 }
439
440 dprintk("CHOOSE got %d\n", item);
441 out[outpos] = item;
442 outpos++;
443 }
444
445 dprintk("CHOOSE returns %d\n", outpos);
446 return outpos;
447}
448
449
450/**
451 * crush_do_rule - calculate a mapping with the given input and rule
452 * @map: the crush_map
453 * @ruleno: the rule id
454 * @x: hash input
455 * @result: pointer to result vector
456 * @result_max: maximum result size
457 * @force: force initial replica choice; -1 for none
458 */
459int crush_do_rule(struct crush_map *map,
460 int ruleno, int x, int *result, int result_max,
461 int force, __u32 *weight)
462{
463 int result_len;
464 int force_context[CRUSH_MAX_DEPTH];
465 int force_pos = -1;
466 int a[CRUSH_MAX_SET];
467 int b[CRUSH_MAX_SET];
468 int c[CRUSH_MAX_SET];
469 int recurse_to_leaf;
470 int *w;
471 int wsize = 0;
472 int *o;
473 int osize;
474 int *tmp;
475 struct crush_rule *rule;
476 int step;
477 int i, j;
478 int numrep;
479 int firstn;
480 int rc = -1;
481
482 BUG_ON(ruleno >= map->max_rules);
483
484 rule = map->rules[ruleno];
485 result_len = 0;
486 w = a;
487 o = b;
488
489 /*
490 * determine hierarchical context of force, if any. note
491 * that this may or may not correspond to the specific types
492 * referenced by the crush rule.
493 */
494 if (force >= 0) {
495 if (force >= map->max_devices ||
496 map->device_parents[force] == 0) {
497 /*dprintk("CRUSH: forcefed device dne\n");*/
498 rc = -1; /* force fed device dne */
499 goto out;
500 }
501 if (!is_out(map, weight, force, x)) {
502 while (1) {
503 force_context[++force_pos] = force;
504 if (force >= 0)
505 force = map->device_parents[force];
506 else
507 force = map->bucket_parents[-1-force];
508 if (force == 0)
509 break;
510 }
511 }
512 }
513
514 for (step = 0; step < rule->len; step++) {
515 firstn = 0;
516 switch (rule->steps[step].op) {
517 case CRUSH_RULE_TAKE:
518 w[0] = rule->steps[step].arg1;
519 if (force_pos >= 0) {
520 BUG_ON(force_context[force_pos] != w[0]);
521 force_pos--;
522 }
523 wsize = 1;
524 break;
525
526 case CRUSH_RULE_CHOOSE_LEAF_FIRSTN:
527 case CRUSH_RULE_CHOOSE_FIRSTN:
528 firstn = 1;
529 case CRUSH_RULE_CHOOSE_LEAF_INDEP:
530 case CRUSH_RULE_CHOOSE_INDEP:
531 BUG_ON(wsize == 0);
532
533 recurse_to_leaf =
534 rule->steps[step].op ==
535 CRUSH_RULE_CHOOSE_LEAF_FIRSTN ||
536 rule->steps[step].op ==
537 CRUSH_RULE_CHOOSE_LEAF_INDEP;
538
539 /* reset output */
540 osize = 0;
541
542 for (i = 0; i < wsize; i++) {
543 /*
544 * see CRUSH_N, CRUSH_N_MINUS macros.
545 * basically, numrep <= 0 means relative to
546 * the provided result_max
547 */
548 numrep = rule->steps[step].arg1;
549 if (numrep <= 0) {
550 numrep += result_max;
551 if (numrep <= 0)
552 continue;
553 }
554 j = 0;
555 if (osize == 0 && force_pos >= 0) {
556 /* skip any intermediate types */
557 while (force_pos &&
558 force_context[force_pos] < 0 &&
559 rule->steps[step].arg2 !=
560 map->buckets[-1 -
561 force_context[force_pos]]->type)
562 force_pos--;
563 o[osize] = force_context[force_pos];
564 if (recurse_to_leaf)
565 c[osize] = force_context[0];
566 j++;
567 force_pos--;
568 }
569 osize += crush_choose(map,
570 map->buckets[-1-w[i]],
571 weight,
572 x, numrep,
573 rule->steps[step].arg2,
574 o+osize, j,
575 firstn,
576 recurse_to_leaf, c+osize);
577 }
578
579 if (recurse_to_leaf)
580 /* copy final _leaf_ values to output set */
581 memcpy(o, c, osize*sizeof(*o));
582
583 /* swap t and w arrays */
584 tmp = o;
585 o = w;
586 w = tmp;
587 wsize = osize;
588 break;
589
590
591 case CRUSH_RULE_EMIT:
592 for (i = 0; i < wsize && result_len < result_max; i++) {
593 result[result_len] = w[i];
594 result_len++;
595 }
596 wsize = 0;
597 break;
598
599 default:
600 BUG_ON(1);
601 }
602 }
603 rc = result_len;
604
605out:
606 return rc;
607}
608
609
diff --git a/fs/ceph/crush/mapper.h b/fs/ceph/crush/mapper.h
deleted file mode 100644
index c46b99c18bb0..000000000000
--- a/fs/ceph/crush/mapper.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef CEPH_CRUSH_MAPPER_H
2#define CEPH_CRUSH_MAPPER_H
3
4/*
5 * CRUSH functions for find rules and then mapping an input to an
6 * output set.
7 *
8 * LGPL2
9 */
10
11#include "crush.h"
12
13extern int crush_find_rule(struct crush_map *map, int pool, int type, int size);
14extern int crush_do_rule(struct crush_map *map,
15 int ruleno,
16 int x, int *result, int result_max,
17 int forcefeed, /* -1 for none */
18 __u32 *weights);
19
20#endif
diff --git a/fs/ceph/crypto.c b/fs/ceph/crypto.c
deleted file mode 100644
index a3e627f63293..000000000000
--- a/fs/ceph/crypto.c
+++ /dev/null
@@ -1,412 +0,0 @@
1
2#include "ceph_debug.h"
3
4#include <linux/err.h>
5#include <linux/scatterlist.h>
6#include <linux/slab.h>
7#include <crypto/hash.h>
8
9#include "crypto.h"
10#include "decode.h"
11
12int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
13{
14 if (*p + sizeof(u16) + sizeof(key->created) +
15 sizeof(u16) + key->len > end)
16 return -ERANGE;
17 ceph_encode_16(p, key->type);
18 ceph_encode_copy(p, &key->created, sizeof(key->created));
19 ceph_encode_16(p, key->len);
20 ceph_encode_copy(p, key->key, key->len);
21 return 0;
22}
23
24int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
25{
26 ceph_decode_need(p, end, 2*sizeof(u16) + sizeof(key->created), bad);
27 key->type = ceph_decode_16(p);
28 ceph_decode_copy(p, &key->created, sizeof(key->created));
29 key->len = ceph_decode_16(p);
30 ceph_decode_need(p, end, key->len, bad);
31 key->key = kmalloc(key->len, GFP_NOFS);
32 if (!key->key)
33 return -ENOMEM;
34 ceph_decode_copy(p, key->key, key->len);
35 return 0;
36
37bad:
38 dout("failed to decode crypto key\n");
39 return -EINVAL;
40}
41
42int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *inkey)
43{
44 int inlen = strlen(inkey);
45 int blen = inlen * 3 / 4;
46 void *buf, *p;
47 int ret;
48
49 dout("crypto_key_unarmor %s\n", inkey);
50 buf = kmalloc(blen, GFP_NOFS);
51 if (!buf)
52 return -ENOMEM;
53 blen = ceph_unarmor(buf, inkey, inkey+inlen);
54 if (blen < 0) {
55 kfree(buf);
56 return blen;
57 }
58
59 p = buf;
60 ret = ceph_crypto_key_decode(key, &p, p + blen);
61 kfree(buf);
62 if (ret)
63 return ret;
64 dout("crypto_key_unarmor key %p type %d len %d\n", key,
65 key->type, key->len);
66 return 0;
67}
68
69
70
71#define AES_KEY_SIZE 16
72
73static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
74{
75 return crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC);
76}
77
78static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
79
80static int ceph_aes_encrypt(const void *key, int key_len,
81 void *dst, size_t *dst_len,
82 const void *src, size_t src_len)
83{
84 struct scatterlist sg_in[2], sg_out[1];
85 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
86 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
87 int ret;
88 void *iv;
89 int ivsize;
90 size_t zero_padding = (0x10 - (src_len & 0x0f));
91 char pad[16];
92
93 if (IS_ERR(tfm))
94 return PTR_ERR(tfm);
95
96 memset(pad, zero_padding, zero_padding);
97
98 *dst_len = src_len + zero_padding;
99
100 crypto_blkcipher_setkey((void *)tfm, key, key_len);
101 sg_init_table(sg_in, 2);
102 sg_set_buf(&sg_in[0], src, src_len);
103 sg_set_buf(&sg_in[1], pad, zero_padding);
104 sg_init_table(sg_out, 1);
105 sg_set_buf(sg_out, dst, *dst_len);
106 iv = crypto_blkcipher_crt(tfm)->iv;
107 ivsize = crypto_blkcipher_ivsize(tfm);
108
109 memcpy(iv, aes_iv, ivsize);
110 /*
111 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
112 key, key_len, 1);
113 print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
114 src, src_len, 1);
115 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
116 pad, zero_padding, 1);
117 */
118 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
119 src_len + zero_padding);
120 crypto_free_blkcipher(tfm);
121 if (ret < 0)
122 pr_err("ceph_aes_crypt failed %d\n", ret);
123 /*
124 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
125 dst, *dst_len, 1);
126 */
127 return 0;
128}
129
130static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
131 size_t *dst_len,
132 const void *src1, size_t src1_len,
133 const void *src2, size_t src2_len)
134{
135 struct scatterlist sg_in[3], sg_out[1];
136 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
137 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
138 int ret;
139 void *iv;
140 int ivsize;
141 size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
142 char pad[16];
143
144 if (IS_ERR(tfm))
145 return PTR_ERR(tfm);
146
147 memset(pad, zero_padding, zero_padding);
148
149 *dst_len = src1_len + src2_len + zero_padding;
150
151 crypto_blkcipher_setkey((void *)tfm, key, key_len);
152 sg_init_table(sg_in, 3);
153 sg_set_buf(&sg_in[0], src1, src1_len);
154 sg_set_buf(&sg_in[1], src2, src2_len);
155 sg_set_buf(&sg_in[2], pad, zero_padding);
156 sg_init_table(sg_out, 1);
157 sg_set_buf(sg_out, dst, *dst_len);
158 iv = crypto_blkcipher_crt(tfm)->iv;
159 ivsize = crypto_blkcipher_ivsize(tfm);
160
161 memcpy(iv, aes_iv, ivsize);
162 /*
163 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
164 key, key_len, 1);
165 print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
166 src1, src1_len, 1);
167 print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
168 src2, src2_len, 1);
169 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
170 pad, zero_padding, 1);
171 */
172 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
173 src1_len + src2_len + zero_padding);
174 crypto_free_blkcipher(tfm);
175 if (ret < 0)
176 pr_err("ceph_aes_crypt2 failed %d\n", ret);
177 /*
178 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
179 dst, *dst_len, 1);
180 */
181 return 0;
182}
183
184static int ceph_aes_decrypt(const void *key, int key_len,
185 void *dst, size_t *dst_len,
186 const void *src, size_t src_len)
187{
188 struct scatterlist sg_in[1], sg_out[2];
189 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
190 struct blkcipher_desc desc = { .tfm = tfm };
191 char pad[16];
192 void *iv;
193 int ivsize;
194 int ret;
195 int last_byte;
196
197 if (IS_ERR(tfm))
198 return PTR_ERR(tfm);
199
200 crypto_blkcipher_setkey((void *)tfm, key, key_len);
201 sg_init_table(sg_in, 1);
202 sg_init_table(sg_out, 2);
203 sg_set_buf(sg_in, src, src_len);
204 sg_set_buf(&sg_out[0], dst, *dst_len);
205 sg_set_buf(&sg_out[1], pad, sizeof(pad));
206
207 iv = crypto_blkcipher_crt(tfm)->iv;
208 ivsize = crypto_blkcipher_ivsize(tfm);
209
210 memcpy(iv, aes_iv, ivsize);
211
212 /*
213 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
214 key, key_len, 1);
215 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
216 src, src_len, 1);
217 */
218
219 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
220 crypto_free_blkcipher(tfm);
221 if (ret < 0) {
222 pr_err("ceph_aes_decrypt failed %d\n", ret);
223 return ret;
224 }
225
226 if (src_len <= *dst_len)
227 last_byte = ((char *)dst)[src_len - 1];
228 else
229 last_byte = pad[src_len - *dst_len - 1];
230 if (last_byte <= 16 && src_len >= last_byte) {
231 *dst_len = src_len - last_byte;
232 } else {
233 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
234 last_byte, (int)src_len);
235 return -EPERM; /* bad padding */
236 }
237 /*
238 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
239 dst, *dst_len, 1);
240 */
241 return 0;
242}
243
244static int ceph_aes_decrypt2(const void *key, int key_len,
245 void *dst1, size_t *dst1_len,
246 void *dst2, size_t *dst2_len,
247 const void *src, size_t src_len)
248{
249 struct scatterlist sg_in[1], sg_out[3];
250 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
251 struct blkcipher_desc desc = { .tfm = tfm };
252 char pad[16];
253 void *iv;
254 int ivsize;
255 int ret;
256 int last_byte;
257
258 if (IS_ERR(tfm))
259 return PTR_ERR(tfm);
260
261 sg_init_table(sg_in, 1);
262 sg_set_buf(sg_in, src, src_len);
263 sg_init_table(sg_out, 3);
264 sg_set_buf(&sg_out[0], dst1, *dst1_len);
265 sg_set_buf(&sg_out[1], dst2, *dst2_len);
266 sg_set_buf(&sg_out[2], pad, sizeof(pad));
267
268 crypto_blkcipher_setkey((void *)tfm, key, key_len);
269 iv = crypto_blkcipher_crt(tfm)->iv;
270 ivsize = crypto_blkcipher_ivsize(tfm);
271
272 memcpy(iv, aes_iv, ivsize);
273
274 /*
275 print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
276 key, key_len, 1);
277 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
278 src, src_len, 1);
279 */
280
281 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
282 crypto_free_blkcipher(tfm);
283 if (ret < 0) {
284 pr_err("ceph_aes_decrypt failed %d\n", ret);
285 return ret;
286 }
287
288 if (src_len <= *dst1_len)
289 last_byte = ((char *)dst1)[src_len - 1];
290 else if (src_len <= *dst1_len + *dst2_len)
291 last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
292 else
293 last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
294 if (last_byte <= 16 && src_len >= last_byte) {
295 src_len -= last_byte;
296 } else {
297 pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
298 last_byte, (int)src_len);
299 return -EPERM; /* bad padding */
300 }
301
302 if (src_len < *dst1_len) {
303 *dst1_len = src_len;
304 *dst2_len = 0;
305 } else {
306 *dst2_len = src_len - *dst1_len;
307 }
308 /*
309 print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1,
310 dst1, *dst1_len, 1);
311 print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1,
312 dst2, *dst2_len, 1);
313 */
314
315 return 0;
316}
317
318
319int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
320 const void *src, size_t src_len)
321{
322 switch (secret->type) {
323 case CEPH_CRYPTO_NONE:
324 if (*dst_len < src_len)
325 return -ERANGE;
326 memcpy(dst, src, src_len);
327 *dst_len = src_len;
328 return 0;
329
330 case CEPH_CRYPTO_AES:
331 return ceph_aes_decrypt(secret->key, secret->len, dst,
332 dst_len, src, src_len);
333
334 default:
335 return -EINVAL;
336 }
337}
338
339int ceph_decrypt2(struct ceph_crypto_key *secret,
340 void *dst1, size_t *dst1_len,
341 void *dst2, size_t *dst2_len,
342 const void *src, size_t src_len)
343{
344 size_t t;
345
346 switch (secret->type) {
347 case CEPH_CRYPTO_NONE:
348 if (*dst1_len + *dst2_len < src_len)
349 return -ERANGE;
350 t = min(*dst1_len, src_len);
351 memcpy(dst1, src, t);
352 *dst1_len = t;
353 src += t;
354 src_len -= t;
355 if (src_len) {
356 t = min(*dst2_len, src_len);
357 memcpy(dst2, src, t);
358 *dst2_len = t;
359 }
360 return 0;
361
362 case CEPH_CRYPTO_AES:
363 return ceph_aes_decrypt2(secret->key, secret->len,
364 dst1, dst1_len, dst2, dst2_len,
365 src, src_len);
366
367 default:
368 return -EINVAL;
369 }
370}
371
372int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
373 const void *src, size_t src_len)
374{
375 switch (secret->type) {
376 case CEPH_CRYPTO_NONE:
377 if (*dst_len < src_len)
378 return -ERANGE;
379 memcpy(dst, src, src_len);
380 *dst_len = src_len;
381 return 0;
382
383 case CEPH_CRYPTO_AES:
384 return ceph_aes_encrypt(secret->key, secret->len, dst,
385 dst_len, src, src_len);
386
387 default:
388 return -EINVAL;
389 }
390}
391
392int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
393 const void *src1, size_t src1_len,
394 const void *src2, size_t src2_len)
395{
396 switch (secret->type) {
397 case CEPH_CRYPTO_NONE:
398 if (*dst_len < src1_len + src2_len)
399 return -ERANGE;
400 memcpy(dst, src1, src1_len);
401 memcpy(dst + src1_len, src2, src2_len);
402 *dst_len = src1_len + src2_len;
403 return 0;
404
405 case CEPH_CRYPTO_AES:
406 return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
407 src1, src1_len, src2, src2_len);
408
409 default:
410 return -EINVAL;
411 }
412}
diff --git a/fs/ceph/crypto.h b/fs/ceph/crypto.h
deleted file mode 100644
index bdf38607323c..000000000000
--- a/fs/ceph/crypto.h
+++ /dev/null
@@ -1,48 +0,0 @@
1#ifndef _FS_CEPH_CRYPTO_H
2#define _FS_CEPH_CRYPTO_H
3
4#include "types.h"
5#include "buffer.h"
6
7/*
8 * cryptographic secret
9 */
10struct ceph_crypto_key {
11 int type;
12 struct ceph_timespec created;
13 int len;
14 void *key;
15};
16
17static inline void ceph_crypto_key_destroy(struct ceph_crypto_key *key)
18{
19 kfree(key->key);
20}
21
22extern int ceph_crypto_key_encode(struct ceph_crypto_key *key,
23 void **p, void *end);
24extern int ceph_crypto_key_decode(struct ceph_crypto_key *key,
25 void **p, void *end);
26extern int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
27
28/* crypto.c */
29extern int ceph_decrypt(struct ceph_crypto_key *secret,
30 void *dst, size_t *dst_len,
31 const void *src, size_t src_len);
32extern int ceph_encrypt(struct ceph_crypto_key *secret,
33 void *dst, size_t *dst_len,
34 const void *src, size_t src_len);
35extern int ceph_decrypt2(struct ceph_crypto_key *secret,
36 void *dst1, size_t *dst1_len,
37 void *dst2, size_t *dst2_len,
38 const void *src, size_t src_len);
39extern int ceph_encrypt2(struct ceph_crypto_key *secret,
40 void *dst, size_t *dst_len,
41 const void *src1, size_t src1_len,
42 const void *src2, size_t src2_len);
43
44/* armor.c */
45extern int ceph_armor(char *dst, const char *src, const char *end);
46extern int ceph_unarmor(char *dst, const char *src, const char *end);
47
48#endif
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 6fd8b20a8611..94fe1d284c3a 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/device.h> 3#include <linux/device.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
@@ -7,143 +7,48 @@
7#include <linux/debugfs.h> 7#include <linux/debugfs.h>
8#include <linux/seq_file.h> 8#include <linux/seq_file.h>
9 9
10#include "super.h" 10#include <linux/ceph/libceph.h>
11#include "mds_client.h" 11#include <linux/ceph/mon_client.h>
12#include "mon_client.h" 12#include <linux/ceph/auth.h>
13#include "auth.h" 13#include <linux/ceph/debugfs.h>
14 14
15#ifdef CONFIG_DEBUG_FS 15#ifdef CONFIG_DEBUG_FS
16 16
17/* 17#include "super.h"
18 * Implement /sys/kernel/debug/ceph fun 18#include "mds_client.h"
19 *
20 * /sys/kernel/debug/ceph/client* - an instance of the ceph client
21 * .../osdmap - current osdmap
22 * .../mdsmap - current mdsmap
23 * .../monmap - current monmap
24 * .../osdc - active osd requests
25 * .../mdsc - active mds requests
26 * .../monc - mon client state
27 * .../dentry_lru - dump contents of dentry lru
28 * .../caps - expose cap (reservation) stats
29 * .../bdi - symlink to ../../bdi/something
30 */
31
32static struct dentry *ceph_debugfs_dir;
33
34static int monmap_show(struct seq_file *s, void *p)
35{
36 int i;
37 struct ceph_client *client = s->private;
38
39 if (client->monc.monmap == NULL)
40 return 0;
41
42 seq_printf(s, "epoch %d\n", client->monc.monmap->epoch);
43 for (i = 0; i < client->monc.monmap->num_mon; i++) {
44 struct ceph_entity_inst *inst =
45 &client->monc.monmap->mon_inst[i];
46
47 seq_printf(s, "\t%s%lld\t%s\n",
48 ENTITY_NAME(inst->name),
49 pr_addr(&inst->addr.in_addr));
50 }
51 return 0;
52}
53 19
54static int mdsmap_show(struct seq_file *s, void *p) 20static int mdsmap_show(struct seq_file *s, void *p)
55{ 21{
56 int i; 22 int i;
57 struct ceph_client *client = s->private; 23 struct ceph_fs_client *fsc = s->private;
58 24
59 if (client->mdsc.mdsmap == NULL) 25 if (fsc->mdsc == NULL || fsc->mdsc->mdsmap == NULL)
60 return 0; 26 return 0;
61 seq_printf(s, "epoch %d\n", client->mdsc.mdsmap->m_epoch); 27 seq_printf(s, "epoch %d\n", fsc->mdsc->mdsmap->m_epoch);
62 seq_printf(s, "root %d\n", client->mdsc.mdsmap->m_root); 28 seq_printf(s, "root %d\n", fsc->mdsc->mdsmap->m_root);
63 seq_printf(s, "session_timeout %d\n", 29 seq_printf(s, "session_timeout %d\n",
64 client->mdsc.mdsmap->m_session_timeout); 30 fsc->mdsc->mdsmap->m_session_timeout);
65 seq_printf(s, "session_autoclose %d\n", 31 seq_printf(s, "session_autoclose %d\n",
66 client->mdsc.mdsmap->m_session_autoclose); 32 fsc->mdsc->mdsmap->m_session_autoclose);
67 for (i = 0; i < client->mdsc.mdsmap->m_max_mds; i++) { 33 for (i = 0; i < fsc->mdsc->mdsmap->m_max_mds; i++) {
68 struct ceph_entity_addr *addr = 34 struct ceph_entity_addr *addr =
69 &client->mdsc.mdsmap->m_info[i].addr; 35 &fsc->mdsc->mdsmap->m_info[i].addr;
70 int state = client->mdsc.mdsmap->m_info[i].state; 36 int state = fsc->mdsc->mdsmap->m_info[i].state;
71 37
72 seq_printf(s, "\tmds%d\t%s\t(%s)\n", i, pr_addr(&addr->in_addr), 38 seq_printf(s, "\tmds%d\t%s\t(%s)\n", i,
39 ceph_pr_addr(&addr->in_addr),
73 ceph_mds_state_name(state)); 40 ceph_mds_state_name(state));
74 } 41 }
75 return 0; 42 return 0;
76} 43}
77 44
78static int osdmap_show(struct seq_file *s, void *p) 45/*
79{ 46 * mdsc debugfs
80 int i; 47 */
81 struct ceph_client *client = s->private;
82 struct rb_node *n;
83
84 if (client->osdc.osdmap == NULL)
85 return 0;
86 seq_printf(s, "epoch %d\n", client->osdc.osdmap->epoch);
87 seq_printf(s, "flags%s%s\n",
88 (client->osdc.osdmap->flags & CEPH_OSDMAP_NEARFULL) ?
89 " NEARFULL" : "",
90 (client->osdc.osdmap->flags & CEPH_OSDMAP_FULL) ?
91 " FULL" : "");
92 for (n = rb_first(&client->osdc.osdmap->pg_pools); n; n = rb_next(n)) {
93 struct ceph_pg_pool_info *pool =
94 rb_entry(n, struct ceph_pg_pool_info, node);
95 seq_printf(s, "pg_pool %d pg_num %d / %d, lpg_num %d / %d\n",
96 pool->id, pool->v.pg_num, pool->pg_num_mask,
97 pool->v.lpg_num, pool->lpg_num_mask);
98 }
99 for (i = 0; i < client->osdc.osdmap->max_osd; i++) {
100 struct ceph_entity_addr *addr =
101 &client->osdc.osdmap->osd_addr[i];
102 int state = client->osdc.osdmap->osd_state[i];
103 char sb[64];
104
105 seq_printf(s, "\tosd%d\t%s\t%3d%%\t(%s)\n",
106 i, pr_addr(&addr->in_addr),
107 ((client->osdc.osdmap->osd_weight[i]*100) >> 16),
108 ceph_osdmap_state_str(sb, sizeof(sb), state));
109 }
110 return 0;
111}
112
113static int monc_show(struct seq_file *s, void *p)
114{
115 struct ceph_client *client = s->private;
116 struct ceph_mon_generic_request *req;
117 struct ceph_mon_client *monc = &client->monc;
118 struct rb_node *rp;
119
120 mutex_lock(&monc->mutex);
121
122 if (monc->have_mdsmap)
123 seq_printf(s, "have mdsmap %u\n", (unsigned)monc->have_mdsmap);
124 if (monc->have_osdmap)
125 seq_printf(s, "have osdmap %u\n", (unsigned)monc->have_osdmap);
126 if (monc->want_next_osdmap)
127 seq_printf(s, "want next osdmap\n");
128
129 for (rp = rb_first(&monc->generic_request_tree); rp; rp = rb_next(rp)) {
130 __u16 op;
131 req = rb_entry(rp, struct ceph_mon_generic_request, node);
132 op = le16_to_cpu(req->request->hdr.type);
133 if (op == CEPH_MSG_STATFS)
134 seq_printf(s, "%lld statfs\n", req->tid);
135 else
136 seq_printf(s, "%lld unknown\n", req->tid);
137 }
138
139 mutex_unlock(&monc->mutex);
140 return 0;
141}
142
143static int mdsc_show(struct seq_file *s, void *p) 48static int mdsc_show(struct seq_file *s, void *p)
144{ 49{
145 struct ceph_client *client = s->private; 50 struct ceph_fs_client *fsc = s->private;
146 struct ceph_mds_client *mdsc = &client->mdsc; 51 struct ceph_mds_client *mdsc = fsc->mdsc;
147 struct ceph_mds_request *req; 52 struct ceph_mds_request *req;
148 struct rb_node *rp; 53 struct rb_node *rp;
149 int pathlen; 54 int pathlen;
@@ -214,61 +119,12 @@ static int mdsc_show(struct seq_file *s, void *p)
214 return 0; 119 return 0;
215} 120}
216 121
217static int osdc_show(struct seq_file *s, void *pp)
218{
219 struct ceph_client *client = s->private;
220 struct ceph_osd_client *osdc = &client->osdc;
221 struct rb_node *p;
222
223 mutex_lock(&osdc->request_mutex);
224 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
225 struct ceph_osd_request *req;
226 struct ceph_osd_request_head *head;
227 struct ceph_osd_op *op;
228 int num_ops;
229 int opcode, olen;
230 int i;
231
232 req = rb_entry(p, struct ceph_osd_request, r_node);
233
234 seq_printf(s, "%lld\tosd%d\t%d.%x\t", req->r_tid,
235 req->r_osd ? req->r_osd->o_osd : -1,
236 le32_to_cpu(req->r_pgid.pool),
237 le16_to_cpu(req->r_pgid.ps));
238
239 head = req->r_request->front.iov_base;
240 op = (void *)(head + 1);
241
242 num_ops = le16_to_cpu(head->num_ops);
243 olen = le32_to_cpu(head->object_len);
244 seq_printf(s, "%.*s", olen,
245 (const char *)(head->ops + num_ops));
246
247 if (req->r_reassert_version.epoch)
248 seq_printf(s, "\t%u'%llu",
249 (unsigned)le32_to_cpu(req->r_reassert_version.epoch),
250 le64_to_cpu(req->r_reassert_version.version));
251 else
252 seq_printf(s, "\t");
253
254 for (i = 0; i < num_ops; i++) {
255 opcode = le16_to_cpu(op->op);
256 seq_printf(s, "\t%s", ceph_osd_op_name(opcode));
257 op++;
258 }
259
260 seq_printf(s, "\n");
261 }
262 mutex_unlock(&osdc->request_mutex);
263 return 0;
264}
265
266static int caps_show(struct seq_file *s, void *p) 122static int caps_show(struct seq_file *s, void *p)
267{ 123{
268 struct ceph_client *client = s->private; 124 struct ceph_fs_client *fsc = s->private;
269 int total, avail, used, reserved, min; 125 int total, avail, used, reserved, min;
270 126
271 ceph_reservation_status(client, &total, &avail, &used, &reserved, &min); 127 ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min);
272 seq_printf(s, "total\t\t%d\n" 128 seq_printf(s, "total\t\t%d\n"
273 "avail\t\t%d\n" 129 "avail\t\t%d\n"
274 "used\t\t%d\n" 130 "used\t\t%d\n"
@@ -280,8 +136,8 @@ static int caps_show(struct seq_file *s, void *p)
280 136
281static int dentry_lru_show(struct seq_file *s, void *ptr) 137static int dentry_lru_show(struct seq_file *s, void *ptr)
282{ 138{
283 struct ceph_client *client = s->private; 139 struct ceph_fs_client *fsc = s->private;
284 struct ceph_mds_client *mdsc = &client->mdsc; 140 struct ceph_mds_client *mdsc = fsc->mdsc;
285 struct ceph_dentry_info *di; 141 struct ceph_dentry_info *di;
286 142
287 spin_lock(&mdsc->dentry_lru_lock); 143 spin_lock(&mdsc->dentry_lru_lock);
@@ -295,199 +151,124 @@ static int dentry_lru_show(struct seq_file *s, void *ptr)
295 return 0; 151 return 0;
296} 152}
297 153
298#define DEFINE_SHOW_FUNC(name) \ 154CEPH_DEFINE_SHOW_FUNC(mdsmap_show)
299static int name##_open(struct inode *inode, struct file *file) \ 155CEPH_DEFINE_SHOW_FUNC(mdsc_show)
300{ \ 156CEPH_DEFINE_SHOW_FUNC(caps_show)
301 struct seq_file *sf; \ 157CEPH_DEFINE_SHOW_FUNC(dentry_lru_show)
302 int ret; \ 158
303 \
304 ret = single_open(file, name, NULL); \
305 sf = file->private_data; \
306 sf->private = inode->i_private; \
307 return ret; \
308} \
309 \
310static const struct file_operations name##_fops = { \
311 .open = name##_open, \
312 .read = seq_read, \
313 .llseek = seq_lseek, \
314 .release = single_release, \
315};
316
317DEFINE_SHOW_FUNC(monmap_show)
318DEFINE_SHOW_FUNC(mdsmap_show)
319DEFINE_SHOW_FUNC(osdmap_show)
320DEFINE_SHOW_FUNC(monc_show)
321DEFINE_SHOW_FUNC(mdsc_show)
322DEFINE_SHOW_FUNC(osdc_show)
323DEFINE_SHOW_FUNC(dentry_lru_show)
324DEFINE_SHOW_FUNC(caps_show)
325 159
160/*
161 * debugfs
162 */
326static int congestion_kb_set(void *data, u64 val) 163static int congestion_kb_set(void *data, u64 val)
327{ 164{
328 struct ceph_client *client = (struct ceph_client *)data; 165 struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
329
330 if (client)
331 client->mount_args->congestion_kb = (int)val;
332 166
167 fsc->mount_options->congestion_kb = (int)val;
333 return 0; 168 return 0;
334} 169}
335 170
336static int congestion_kb_get(void *data, u64 *val) 171static int congestion_kb_get(void *data, u64 *val)
337{ 172{
338 struct ceph_client *client = (struct ceph_client *)data; 173 struct ceph_fs_client *fsc = (struct ceph_fs_client *)data;
339
340 if (client)
341 *val = (u64)client->mount_args->congestion_kb;
342 174
175 *val = (u64)fsc->mount_options->congestion_kb;
343 return 0; 176 return 0;
344} 177}
345 178
346
347DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get, 179DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
348 congestion_kb_set, "%llu\n"); 180 congestion_kb_set, "%llu\n");
349 181
350int __init ceph_debugfs_init(void)
351{
352 ceph_debugfs_dir = debugfs_create_dir("ceph", NULL);
353 if (!ceph_debugfs_dir)
354 return -ENOMEM;
355 return 0;
356}
357 182
358void ceph_debugfs_cleanup(void) 183void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
359{ 184{
360 debugfs_remove(ceph_debugfs_dir); 185 dout("ceph_fs_debugfs_cleanup\n");
186 debugfs_remove(fsc->debugfs_bdi);
187 debugfs_remove(fsc->debugfs_congestion_kb);
188 debugfs_remove(fsc->debugfs_mdsmap);
189 debugfs_remove(fsc->debugfs_caps);
190 debugfs_remove(fsc->debugfs_mdsc);
191 debugfs_remove(fsc->debugfs_dentry_lru);
361} 192}
362 193
363int ceph_debugfs_client_init(struct ceph_client *client) 194int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
364{ 195{
365 int ret = 0; 196 char name[100];
366 char name[80]; 197 int err = -ENOMEM;
367
368 snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid,
369 client->monc.auth->global_id);
370
371 client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir);
372 if (!client->debugfs_dir)
373 goto out;
374 198
375 client->monc.debugfs_file = debugfs_create_file("monc", 199 dout("ceph_fs_debugfs_init\n");
376 0600, 200 fsc->debugfs_congestion_kb =
377 client->debugfs_dir, 201 debugfs_create_file("writeback_congestion_kb",
378 client, 202 0600,
379 &monc_show_fops); 203 fsc->client->debugfs_dir,
380 if (!client->monc.debugfs_file) 204 fsc,
381 goto out; 205 &congestion_kb_fops);
382 206 if (!fsc->debugfs_congestion_kb)
383 client->mdsc.debugfs_file = debugfs_create_file("mdsc",
384 0600,
385 client->debugfs_dir,
386 client,
387 &mdsc_show_fops);
388 if (!client->mdsc.debugfs_file)
389 goto out; 207 goto out;
390 208
391 client->osdc.debugfs_file = debugfs_create_file("osdc", 209 dout("a\n");
392 0600,
393 client->debugfs_dir,
394 client,
395 &osdc_show_fops);
396 if (!client->osdc.debugfs_file)
397 goto out;
398 210
399 client->debugfs_monmap = debugfs_create_file("monmap", 211 snprintf(name, sizeof(name), "../../bdi/%s",
400 0600, 212 dev_name(fsc->backing_dev_info.dev));
401 client->debugfs_dir, 213 fsc->debugfs_bdi =
402 client, 214 debugfs_create_symlink("bdi",
403 &monmap_show_fops); 215 fsc->client->debugfs_dir,
404 if (!client->debugfs_monmap) 216 name);
217 if (!fsc->debugfs_bdi)
405 goto out; 218 goto out;
406 219
407 client->debugfs_mdsmap = debugfs_create_file("mdsmap", 220 dout("b\n");
221 fsc->debugfs_mdsmap = debugfs_create_file("mdsmap",
408 0600, 222 0600,
409 client->debugfs_dir, 223 fsc->client->debugfs_dir,
410 client, 224 fsc,
411 &mdsmap_show_fops); 225 &mdsmap_show_fops);
412 if (!client->debugfs_mdsmap) 226 if (!fsc->debugfs_mdsmap)
413 goto out; 227 goto out;
414 228
415 client->debugfs_osdmap = debugfs_create_file("osdmap", 229 dout("ca\n");
416 0600, 230 fsc->debugfs_mdsc = debugfs_create_file("mdsc",
417 client->debugfs_dir, 231 0600,
418 client, 232 fsc->client->debugfs_dir,
419 &osdmap_show_fops); 233 fsc,
420 if (!client->debugfs_osdmap) 234 &mdsc_show_fops);
235 if (!fsc->debugfs_mdsc)
421 goto out; 236 goto out;
422 237
423 client->debugfs_dentry_lru = debugfs_create_file("dentry_lru", 238 dout("da\n");
424 0600, 239 fsc->debugfs_caps = debugfs_create_file("caps",
425 client->debugfs_dir,
426 client,
427 &dentry_lru_show_fops);
428 if (!client->debugfs_dentry_lru)
429 goto out;
430
431 client->debugfs_caps = debugfs_create_file("caps",
432 0400, 240 0400,
433 client->debugfs_dir, 241 fsc->client->debugfs_dir,
434 client, 242 fsc,
435 &caps_show_fops); 243 &caps_show_fops);
436 if (!client->debugfs_caps) 244 if (!fsc->debugfs_caps)
437 goto out; 245 goto out;
438 246
439 client->debugfs_congestion_kb = 247 dout("ea\n");
440 debugfs_create_file("writeback_congestion_kb", 248 fsc->debugfs_dentry_lru = debugfs_create_file("dentry_lru",
441 0600, 249 0600,
442 client->debugfs_dir, 250 fsc->client->debugfs_dir,
443 client, 251 fsc,
444 &congestion_kb_fops); 252 &dentry_lru_show_fops);
445 if (!client->debugfs_congestion_kb) 253 if (!fsc->debugfs_dentry_lru)
446 goto out; 254 goto out;
447 255
448 sprintf(name, "../../bdi/%s", dev_name(client->sb->s_bdi->dev));
449 client->debugfs_bdi = debugfs_create_symlink("bdi", client->debugfs_dir,
450 name);
451
452 return 0; 256 return 0;
453 257
454out: 258out:
455 ceph_debugfs_client_cleanup(client); 259 ceph_fs_debugfs_cleanup(fsc);
456 return ret; 260 return err;
457} 261}
458 262
459void ceph_debugfs_client_cleanup(struct ceph_client *client)
460{
461 debugfs_remove(client->debugfs_bdi);
462 debugfs_remove(client->debugfs_caps);
463 debugfs_remove(client->debugfs_dentry_lru);
464 debugfs_remove(client->debugfs_osdmap);
465 debugfs_remove(client->debugfs_mdsmap);
466 debugfs_remove(client->debugfs_monmap);
467 debugfs_remove(client->osdc.debugfs_file);
468 debugfs_remove(client->mdsc.debugfs_file);
469 debugfs_remove(client->monc.debugfs_file);
470 debugfs_remove(client->debugfs_congestion_kb);
471 debugfs_remove(client->debugfs_dir);
472}
473 263
474#else /* CONFIG_DEBUG_FS */ 264#else /* CONFIG_DEBUG_FS */
475 265
476int __init ceph_debugfs_init(void) 266int ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
477{
478 return 0;
479}
480
481void ceph_debugfs_cleanup(void)
482{
483}
484
485int ceph_debugfs_client_init(struct ceph_client *client)
486{ 267{
487 return 0; 268 return 0;
488} 269}
489 270
490void ceph_debugfs_client_cleanup(struct ceph_client *client) 271void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
491{ 272{
492} 273}
493 274
diff --git a/fs/ceph/decode.h b/fs/ceph/decode.h
deleted file mode 100644
index c5b6939fb32a..000000000000
--- a/fs/ceph/decode.h
+++ /dev/null
@@ -1,201 +0,0 @@
1#ifndef __CEPH_DECODE_H
2#define __CEPH_DECODE_H
3
4#include <asm/unaligned.h>
5#include <linux/time.h>
6
7#include "types.h"
8
9/*
10 * in all cases,
11 * void **p pointer to position pointer
12 * void *end pointer to end of buffer (last byte + 1)
13 */
14
15static inline u64 ceph_decode_64(void **p)
16{
17 u64 v = get_unaligned_le64(*p);
18 *p += sizeof(u64);
19 return v;
20}
21static inline u32 ceph_decode_32(void **p)
22{
23 u32 v = get_unaligned_le32(*p);
24 *p += sizeof(u32);
25 return v;
26}
27static inline u16 ceph_decode_16(void **p)
28{
29 u16 v = get_unaligned_le16(*p);
30 *p += sizeof(u16);
31 return v;
32}
33static inline u8 ceph_decode_8(void **p)
34{
35 u8 v = *(u8 *)*p;
36 (*p)++;
37 return v;
38}
39static inline void ceph_decode_copy(void **p, void *pv, size_t n)
40{
41 memcpy(pv, *p, n);
42 *p += n;
43}
44
45/*
46 * bounds check input.
47 */
48#define ceph_decode_need(p, end, n, bad) \
49 do { \
50 if (unlikely(*(p) + (n) > (end))) \
51 goto bad; \
52 } while (0)
53
54#define ceph_decode_64_safe(p, end, v, bad) \
55 do { \
56 ceph_decode_need(p, end, sizeof(u64), bad); \
57 v = ceph_decode_64(p); \
58 } while (0)
59#define ceph_decode_32_safe(p, end, v, bad) \
60 do { \
61 ceph_decode_need(p, end, sizeof(u32), bad); \
62 v = ceph_decode_32(p); \
63 } while (0)
64#define ceph_decode_16_safe(p, end, v, bad) \
65 do { \
66 ceph_decode_need(p, end, sizeof(u16), bad); \
67 v = ceph_decode_16(p); \
68 } while (0)
69#define ceph_decode_8_safe(p, end, v, bad) \
70 do { \
71 ceph_decode_need(p, end, sizeof(u8), bad); \
72 v = ceph_decode_8(p); \
73 } while (0)
74
75#define ceph_decode_copy_safe(p, end, pv, n, bad) \
76 do { \
77 ceph_decode_need(p, end, n, bad); \
78 ceph_decode_copy(p, pv, n); \
79 } while (0)
80
81/*
82 * struct ceph_timespec <-> struct timespec
83 */
84static inline void ceph_decode_timespec(struct timespec *ts,
85 const struct ceph_timespec *tv)
86{
87 ts->tv_sec = le32_to_cpu(tv->tv_sec);
88 ts->tv_nsec = le32_to_cpu(tv->tv_nsec);
89}
90static inline void ceph_encode_timespec(struct ceph_timespec *tv,
91 const struct timespec *ts)
92{
93 tv->tv_sec = cpu_to_le32(ts->tv_sec);
94 tv->tv_nsec = cpu_to_le32(ts->tv_nsec);
95}
96
97/*
98 * sockaddr_storage <-> ceph_sockaddr
99 */
100static inline void ceph_encode_addr(struct ceph_entity_addr *a)
101{
102 __be16 ss_family = htons(a->in_addr.ss_family);
103 a->in_addr.ss_family = *(__u16 *)&ss_family;
104}
105static inline void ceph_decode_addr(struct ceph_entity_addr *a)
106{
107 __be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
108 a->in_addr.ss_family = ntohs(ss_family);
109 WARN_ON(a->in_addr.ss_family == 512);
110}
111
112/*
113 * encoders
114 */
115static inline void ceph_encode_64(void **p, u64 v)
116{
117 put_unaligned_le64(v, (__le64 *)*p);
118 *p += sizeof(u64);
119}
120static inline void ceph_encode_32(void **p, u32 v)
121{
122 put_unaligned_le32(v, (__le32 *)*p);
123 *p += sizeof(u32);
124}
125static inline void ceph_encode_16(void **p, u16 v)
126{
127 put_unaligned_le16(v, (__le16 *)*p);
128 *p += sizeof(u16);
129}
130static inline void ceph_encode_8(void **p, u8 v)
131{
132 *(u8 *)*p = v;
133 (*p)++;
134}
135static inline void ceph_encode_copy(void **p, const void *s, int len)
136{
137 memcpy(*p, s, len);
138 *p += len;
139}
140
141/*
142 * filepath, string encoders
143 */
144static inline void ceph_encode_filepath(void **p, void *end,
145 u64 ino, const char *path)
146{
147 u32 len = path ? strlen(path) : 0;
148 BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end);
149 ceph_encode_8(p, 1);
150 ceph_encode_64(p, ino);
151 ceph_encode_32(p, len);
152 if (len)
153 memcpy(*p, path, len);
154 *p += len;
155}
156
157static inline void ceph_encode_string(void **p, void *end,
158 const char *s, u32 len)
159{
160 BUG_ON(*p + sizeof(len) + len > end);
161 ceph_encode_32(p, len);
162 if (len)
163 memcpy(*p, s, len);
164 *p += len;
165}
166
167#define ceph_encode_need(p, end, n, bad) \
168 do { \
169 if (unlikely(*(p) + (n) > (end))) \
170 goto bad; \
171 } while (0)
172
173#define ceph_encode_64_safe(p, end, v, bad) \
174 do { \
175 ceph_encode_need(p, end, sizeof(u64), bad); \
176 ceph_encode_64(p, v); \
177 } while (0)
178#define ceph_encode_32_safe(p, end, v, bad) \
179 do { \
180 ceph_encode_need(p, end, sizeof(u32), bad); \
181 ceph_encode_32(p, v); \
182 } while (0)
183#define ceph_encode_16_safe(p, end, v, bad) \
184 do { \
185 ceph_encode_need(p, end, sizeof(u16), bad); \
186 ceph_encode_16(p, v); \
187 } while (0)
188
189#define ceph_encode_copy_safe(p, end, pv, n, bad) \
190 do { \
191 ceph_encode_need(p, end, n, bad); \
192 ceph_encode_copy(p, pv, n); \
193 } while (0)
194#define ceph_encode_string_safe(p, end, s, n, bad) \
195 do { \
196 ceph_encode_need(p, end, n, bad); \
197 ceph_encode_string(p, end, s, n); \
198 } while (0)
199
200
201#endif
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index a1986eb52045..a8d2aacc612b 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/spinlock.h> 3#include <linux/spinlock.h>
4#include <linux/fs_struct.h> 4#include <linux/fs_struct.h>
@@ -7,6 +7,7 @@
7#include <linux/sched.h> 7#include <linux/sched.h>
8 8
9#include "super.h" 9#include "super.h"
10#include "mds_client.h"
10 11
11/* 12/*
12 * Directory operations: readdir, lookup, create, link, unlink, 13 * Directory operations: readdir, lookup, create, link, unlink,
@@ -227,15 +228,15 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
227 struct ceph_file_info *fi = filp->private_data; 228 struct ceph_file_info *fi = filp->private_data;
228 struct inode *inode = filp->f_dentry->d_inode; 229 struct inode *inode = filp->f_dentry->d_inode;
229 struct ceph_inode_info *ci = ceph_inode(inode); 230 struct ceph_inode_info *ci = ceph_inode(inode);
230 struct ceph_client *client = ceph_inode_to_client(inode); 231 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
231 struct ceph_mds_client *mdsc = &client->mdsc; 232 struct ceph_mds_client *mdsc = fsc->mdsc;
232 unsigned frag = fpos_frag(filp->f_pos); 233 unsigned frag = fpos_frag(filp->f_pos);
233 int off = fpos_off(filp->f_pos); 234 int off = fpos_off(filp->f_pos);
234 int err; 235 int err;
235 u32 ftype; 236 u32 ftype;
236 struct ceph_mds_reply_info_parsed *rinfo; 237 struct ceph_mds_reply_info_parsed *rinfo;
237 const int max_entries = client->mount_args->max_readdir; 238 const int max_entries = fsc->mount_options->max_readdir;
238 const int max_bytes = client->mount_args->max_readdir_bytes; 239 const int max_bytes = fsc->mount_options->max_readdir_bytes;
239 240
240 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off); 241 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
241 if (fi->at_end) 242 if (fi->at_end)
@@ -267,7 +268,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
267 /* can we use the dcache? */ 268 /* can we use the dcache? */
268 spin_lock(&inode->i_lock); 269 spin_lock(&inode->i_lock);
269 if ((filp->f_pos == 2 || fi->dentry) && 270 if ((filp->f_pos == 2 || fi->dentry) &&
270 !ceph_test_opt(client, NOASYNCREADDIR) && 271 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
271 ceph_snap(inode) != CEPH_SNAPDIR && 272 ceph_snap(inode) != CEPH_SNAPDIR &&
272 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 273 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
273 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { 274 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
@@ -487,14 +488,14 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
487struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, 488struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
488 struct dentry *dentry, int err) 489 struct dentry *dentry, int err)
489{ 490{
490 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); 491 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
491 struct inode *parent = dentry->d_parent->d_inode; 492 struct inode *parent = dentry->d_parent->d_inode;
492 493
493 /* .snap dir? */ 494 /* .snap dir? */
494 if (err == -ENOENT && 495 if (err == -ENOENT &&
495 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */ 496 ceph_vino(parent).ino != CEPH_INO_ROOT && /* no .snap in root dir */
496 strcmp(dentry->d_name.name, 497 strcmp(dentry->d_name.name,
497 client->mount_args->snapdir_name) == 0) { 498 fsc->mount_options->snapdir_name) == 0) {
498 struct inode *inode = ceph_get_snapdir(parent); 499 struct inode *inode = ceph_get_snapdir(parent);
499 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n", 500 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
500 dentry, dentry->d_name.len, dentry->d_name.name, inode); 501 dentry, dentry->d_name.len, dentry->d_name.name, inode);
@@ -539,8 +540,8 @@ static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
539static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, 540static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
540 struct nameidata *nd) 541 struct nameidata *nd)
541{ 542{
542 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 543 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
543 struct ceph_mds_client *mdsc = &client->mdsc; 544 struct ceph_mds_client *mdsc = fsc->mdsc;
544 struct ceph_mds_request *req; 545 struct ceph_mds_request *req;
545 int op; 546 int op;
546 int err; 547 int err;
@@ -572,7 +573,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
572 spin_lock(&dir->i_lock); 573 spin_lock(&dir->i_lock);
573 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags); 574 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
574 if (strncmp(dentry->d_name.name, 575 if (strncmp(dentry->d_name.name,
575 client->mount_args->snapdir_name, 576 fsc->mount_options->snapdir_name,
576 dentry->d_name.len) && 577 dentry->d_name.len) &&
577 !is_root_ceph_dentry(dir, dentry) && 578 !is_root_ceph_dentry(dir, dentry) &&
578 (ci->i_ceph_flags & CEPH_I_COMPLETE) && 579 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
@@ -629,8 +630,8 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
629static int ceph_mknod(struct inode *dir, struct dentry *dentry, 630static int ceph_mknod(struct inode *dir, struct dentry *dentry,
630 int mode, dev_t rdev) 631 int mode, dev_t rdev)
631{ 632{
632 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 633 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
633 struct ceph_mds_client *mdsc = &client->mdsc; 634 struct ceph_mds_client *mdsc = fsc->mdsc;
634 struct ceph_mds_request *req; 635 struct ceph_mds_request *req;
635 int err; 636 int err;
636 637
@@ -685,8 +686,8 @@ static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
685static int ceph_symlink(struct inode *dir, struct dentry *dentry, 686static int ceph_symlink(struct inode *dir, struct dentry *dentry,
686 const char *dest) 687 const char *dest)
687{ 688{
688 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 689 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
689 struct ceph_mds_client *mdsc = &client->mdsc; 690 struct ceph_mds_client *mdsc = fsc->mdsc;
690 struct ceph_mds_request *req; 691 struct ceph_mds_request *req;
691 int err; 692 int err;
692 693
@@ -716,8 +717,8 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
716 717
717static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode) 718static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
718{ 719{
719 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 720 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
720 struct ceph_mds_client *mdsc = &client->mdsc; 721 struct ceph_mds_client *mdsc = fsc->mdsc;
721 struct ceph_mds_request *req; 722 struct ceph_mds_request *req;
722 int err = -EROFS; 723 int err = -EROFS;
723 int op; 724 int op;
@@ -758,8 +759,8 @@ out:
758static int ceph_link(struct dentry *old_dentry, struct inode *dir, 759static int ceph_link(struct dentry *old_dentry, struct inode *dir,
759 struct dentry *dentry) 760 struct dentry *dentry)
760{ 761{
761 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 762 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
762 struct ceph_mds_client *mdsc = &client->mdsc; 763 struct ceph_mds_client *mdsc = fsc->mdsc;
763 struct ceph_mds_request *req; 764 struct ceph_mds_request *req;
764 int err; 765 int err;
765 766
@@ -813,8 +814,8 @@ static int drop_caps_for_unlink(struct inode *inode)
813 */ 814 */
814static int ceph_unlink(struct inode *dir, struct dentry *dentry) 815static int ceph_unlink(struct inode *dir, struct dentry *dentry)
815{ 816{
816 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 817 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
817 struct ceph_mds_client *mdsc = &client->mdsc; 818 struct ceph_mds_client *mdsc = fsc->mdsc;
818 struct inode *inode = dentry->d_inode; 819 struct inode *inode = dentry->d_inode;
819 struct ceph_mds_request *req; 820 struct ceph_mds_request *req;
820 int err = -EROFS; 821 int err = -EROFS;
@@ -854,8 +855,8 @@ out:
854static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, 855static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
855 struct inode *new_dir, struct dentry *new_dentry) 856 struct inode *new_dir, struct dentry *new_dentry)
856{ 857{
857 struct ceph_client *client = ceph_sb_to_client(old_dir->i_sb); 858 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
858 struct ceph_mds_client *mdsc = &client->mdsc; 859 struct ceph_mds_client *mdsc = fsc->mdsc;
859 struct ceph_mds_request *req; 860 struct ceph_mds_request *req;
860 int err; 861 int err;
861 862
@@ -1076,7 +1077,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1076 struct ceph_inode_info *ci = ceph_inode(inode); 1077 struct ceph_inode_info *ci = ceph_inode(inode);
1077 int left; 1078 int left;
1078 1079
1079 if (!ceph_test_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT)) 1080 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1080 return -EISDIR; 1081 return -EISDIR;
1081 1082
1082 if (!cf->dir_info) { 1083 if (!cf->dir_info) {
@@ -1177,7 +1178,7 @@ void ceph_dentry_lru_add(struct dentry *dn)
1177 dout("dentry_lru_add %p %p '%.*s'\n", di, dn, 1178 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1178 dn->d_name.len, dn->d_name.name); 1179 dn->d_name.len, dn->d_name.name);
1179 if (di) { 1180 if (di) {
1180 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; 1181 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1181 spin_lock(&mdsc->dentry_lru_lock); 1182 spin_lock(&mdsc->dentry_lru_lock);
1182 list_add_tail(&di->lru, &mdsc->dentry_lru); 1183 list_add_tail(&di->lru, &mdsc->dentry_lru);
1183 mdsc->num_dentry++; 1184 mdsc->num_dentry++;
@@ -1193,7 +1194,7 @@ void ceph_dentry_lru_touch(struct dentry *dn)
1193 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn, 1194 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1194 dn->d_name.len, dn->d_name.name, di->offset); 1195 dn->d_name.len, dn->d_name.name, di->offset);
1195 if (di) { 1196 if (di) {
1196 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; 1197 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1197 spin_lock(&mdsc->dentry_lru_lock); 1198 spin_lock(&mdsc->dentry_lru_lock);
1198 list_move_tail(&di->lru, &mdsc->dentry_lru); 1199 list_move_tail(&di->lru, &mdsc->dentry_lru);
1199 spin_unlock(&mdsc->dentry_lru_lock); 1200 spin_unlock(&mdsc->dentry_lru_lock);
@@ -1208,7 +1209,7 @@ void ceph_dentry_lru_del(struct dentry *dn)
1208 dout("dentry_lru_del %p %p '%.*s'\n", di, dn, 1209 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1209 dn->d_name.len, dn->d_name.name); 1210 dn->d_name.len, dn->d_name.name);
1210 if (di) { 1211 if (di) {
1211 mdsc = &ceph_sb_to_client(dn->d_sb)->mdsc; 1212 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1212 spin_lock(&mdsc->dentry_lru_lock); 1213 spin_lock(&mdsc->dentry_lru_lock);
1213 list_del_init(&di->lru); 1214 list_del_init(&di->lru);
1214 mdsc->num_dentry--; 1215 mdsc->num_dentry--;
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index e38423e82f2e..2297d9426992 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -1,10 +1,11 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/exportfs.h> 3#include <linux/exportfs.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <asm/unaligned.h> 5#include <asm/unaligned.h>
6 6
7#include "super.h" 7#include "super.h"
8#include "mds_client.h"
8 9
9/* 10/*
10 * NFS export support 11 * NFS export support
@@ -120,7 +121,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
120static struct dentry *__cfh_to_dentry(struct super_block *sb, 121static struct dentry *__cfh_to_dentry(struct super_block *sb,
121 struct ceph_nfs_confh *cfh) 122 struct ceph_nfs_confh *cfh)
122{ 123{
123 struct ceph_mds_client *mdsc = &ceph_sb_to_client(sb)->mdsc; 124 struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
124 struct inode *inode; 125 struct inode *inode;
125 struct dentry *dentry; 126 struct dentry *dentry;
126 struct ceph_vino vino; 127 struct ceph_vino vino;
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 66e4da6dba22..e77c28cf3690 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1,5 +1,6 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/module.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/slab.h> 5#include <linux/slab.h>
5#include <linux/file.h> 6#include <linux/file.h>
@@ -38,8 +39,8 @@
38static struct ceph_mds_request * 39static struct ceph_mds_request *
39prepare_open_request(struct super_block *sb, int flags, int create_mode) 40prepare_open_request(struct super_block *sb, int flags, int create_mode)
40{ 41{
41 struct ceph_client *client = ceph_sb_to_client(sb); 42 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
42 struct ceph_mds_client *mdsc = &client->mdsc; 43 struct ceph_mds_client *mdsc = fsc->mdsc;
43 struct ceph_mds_request *req; 44 struct ceph_mds_request *req;
44 int want_auth = USE_ANY_MDS; 45 int want_auth = USE_ANY_MDS;
45 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN; 46 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
@@ -117,8 +118,8 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
117int ceph_open(struct inode *inode, struct file *file) 118int ceph_open(struct inode *inode, struct file *file)
118{ 119{
119 struct ceph_inode_info *ci = ceph_inode(inode); 120 struct ceph_inode_info *ci = ceph_inode(inode);
120 struct ceph_client *client = ceph_sb_to_client(inode->i_sb); 121 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
121 struct ceph_mds_client *mdsc = &client->mdsc; 122 struct ceph_mds_client *mdsc = fsc->mdsc;
122 struct ceph_mds_request *req; 123 struct ceph_mds_request *req;
123 struct ceph_file_info *cf = file->private_data; 124 struct ceph_file_info *cf = file->private_data;
124 struct inode *parent_inode = file->f_dentry->d_parent->d_inode; 125 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
@@ -216,8 +217,8 @@ struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
216 struct nameidata *nd, int mode, 217 struct nameidata *nd, int mode,
217 int locked_dir) 218 int locked_dir)
218{ 219{
219 struct ceph_client *client = ceph_sb_to_client(dir->i_sb); 220 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
220 struct ceph_mds_client *mdsc = &client->mdsc; 221 struct ceph_mds_client *mdsc = fsc->mdsc;
221 struct file *file = nd->intent.open.file; 222 struct file *file = nd->intent.open.file;
222 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry); 223 struct inode *parent_inode = get_dentry_parent_inode(file->f_dentry);
223 struct ceph_mds_request *req; 224 struct ceph_mds_request *req;
@@ -270,163 +271,6 @@ int ceph_release(struct inode *inode, struct file *file)
270} 271}
271 272
272/* 273/*
273 * build a vector of user pages
274 */
275static struct page **get_direct_page_vector(const char __user *data,
276 int num_pages,
277 loff_t off, size_t len)
278{
279 struct page **pages;
280 int rc;
281
282 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
283 if (!pages)
284 return ERR_PTR(-ENOMEM);
285
286 down_read(&current->mm->mmap_sem);
287 rc = get_user_pages(current, current->mm, (unsigned long)data,
288 num_pages, 0, 0, pages, NULL);
289 up_read(&current->mm->mmap_sem);
290 if (rc < 0)
291 goto fail;
292 return pages;
293
294fail:
295 kfree(pages);
296 return ERR_PTR(rc);
297}
298
299static void put_page_vector(struct page **pages, int num_pages)
300{
301 int i;
302
303 for (i = 0; i < num_pages; i++)
304 put_page(pages[i]);
305 kfree(pages);
306}
307
308void ceph_release_page_vector(struct page **pages, int num_pages)
309{
310 int i;
311
312 for (i = 0; i < num_pages; i++)
313 __free_pages(pages[i], 0);
314 kfree(pages);
315}
316
317/*
318 * allocate a vector new pages
319 */
320static struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
321{
322 struct page **pages;
323 int i;
324
325 pages = kmalloc(sizeof(*pages) * num_pages, flags);
326 if (!pages)
327 return ERR_PTR(-ENOMEM);
328 for (i = 0; i < num_pages; i++) {
329 pages[i] = __page_cache_alloc(flags);
330 if (pages[i] == NULL) {
331 ceph_release_page_vector(pages, i);
332 return ERR_PTR(-ENOMEM);
333 }
334 }
335 return pages;
336}
337
338/*
339 * copy user data into a page vector
340 */
341static int copy_user_to_page_vector(struct page **pages,
342 const char __user *data,
343 loff_t off, size_t len)
344{
345 int i = 0;
346 int po = off & ~PAGE_CACHE_MASK;
347 int left = len;
348 int l, bad;
349
350 while (left > 0) {
351 l = min_t(int, PAGE_CACHE_SIZE-po, left);
352 bad = copy_from_user(page_address(pages[i]) + po, data, l);
353 if (bad == l)
354 return -EFAULT;
355 data += l - bad;
356 left -= l - bad;
357 po += l - bad;
358 if (po == PAGE_CACHE_SIZE) {
359 po = 0;
360 i++;
361 }
362 }
363 return len;
364}
365
366/*
367 * copy user data from a page vector into a user pointer
368 */
369static int copy_page_vector_to_user(struct page **pages, char __user *data,
370 loff_t off, size_t len)
371{
372 int i = 0;
373 int po = off & ~PAGE_CACHE_MASK;
374 int left = len;
375 int l, bad;
376
377 while (left > 0) {
378 l = min_t(int, left, PAGE_CACHE_SIZE-po);
379 bad = copy_to_user(data, page_address(pages[i]) + po, l);
380 if (bad == l)
381 return -EFAULT;
382 data += l - bad;
383 left -= l - bad;
384 if (po) {
385 po += l - bad;
386 if (po == PAGE_CACHE_SIZE)
387 po = 0;
388 }
389 i++;
390 }
391 return len;
392}
393
394/*
395 * Zero an extent within a page vector. Offset is relative to the
396 * start of the first page.
397 */
398static void zero_page_vector_range(int off, int len, struct page **pages)
399{
400 int i = off >> PAGE_CACHE_SHIFT;
401
402 off &= ~PAGE_CACHE_MASK;
403
404 dout("zero_page_vector_page %u~%u\n", off, len);
405
406 /* leading partial page? */
407 if (off) {
408 int end = min((int)PAGE_CACHE_SIZE, off + len);
409 dout("zeroing %d %p head from %d\n", i, pages[i],
410 (int)off);
411 zero_user_segment(pages[i], off, end);
412 len -= (end - off);
413 i++;
414 }
415 while (len >= PAGE_CACHE_SIZE) {
416 dout("zeroing %d %p len=%d\n", i, pages[i], len);
417 zero_user_segment(pages[i], 0, PAGE_CACHE_SIZE);
418 len -= PAGE_CACHE_SIZE;
419 i++;
420 }
421 /* trailing partial page? */
422 if (len) {
423 dout("zeroing %d %p tail to %d\n", i, pages[i], (int)len);
424 zero_user_segment(pages[i], 0, len);
425 }
426}
427
428
429/*
430 * Read a range of bytes striped over one or more objects. Iterate over 274 * Read a range of bytes striped over one or more objects. Iterate over
431 * objects we stripe over. (That's not atomic, but good enough for now.) 275 * objects we stripe over. (That's not atomic, but good enough for now.)
432 * 276 *
@@ -438,7 +282,7 @@ static int striped_read(struct inode *inode,
438 struct page **pages, int num_pages, 282 struct page **pages, int num_pages,
439 int *checkeof) 283 int *checkeof)
440{ 284{
441 struct ceph_client *client = ceph_inode_to_client(inode); 285 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
442 struct ceph_inode_info *ci = ceph_inode(inode); 286 struct ceph_inode_info *ci = ceph_inode(inode);
443 u64 pos, this_len; 287 u64 pos, this_len;
444 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */ 288 int page_off = off & ~PAGE_CACHE_MASK; /* first byte's offset in page */
@@ -459,7 +303,7 @@ static int striped_read(struct inode *inode,
459 303
460more: 304more:
461 this_len = left; 305 this_len = left;
462 ret = ceph_osdc_readpages(&client->osdc, ceph_vino(inode), 306 ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
463 &ci->i_layout, pos, &this_len, 307 &ci->i_layout, pos, &this_len,
464 ci->i_truncate_seq, 308 ci->i_truncate_seq,
465 ci->i_truncate_size, 309 ci->i_truncate_size,
@@ -477,8 +321,8 @@ more:
477 321
478 if (read < pos - off) { 322 if (read < pos - off) {
479 dout(" zero gap %llu to %llu\n", off + read, pos); 323 dout(" zero gap %llu to %llu\n", off + read, pos);
480 zero_page_vector_range(page_off + read, 324 ceph_zero_page_vector_range(page_off + read,
481 pos - off - read, pages); 325 pos - off - read, pages);
482 } 326 }
483 pos += ret; 327 pos += ret;
484 read = pos - off; 328 read = pos - off;
@@ -495,8 +339,8 @@ more:
495 /* was original extent fully inside i_size? */ 339 /* was original extent fully inside i_size? */
496 if (pos + left <= inode->i_size) { 340 if (pos + left <= inode->i_size) {
497 dout("zero tail\n"); 341 dout("zero tail\n");
498 zero_page_vector_range(page_off + read, len - read, 342 ceph_zero_page_vector_range(page_off + read, len - read,
499 pages); 343 pages);
500 read = len; 344 read = len;
501 goto out; 345 goto out;
502 } 346 }
@@ -531,7 +375,7 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
531 (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); 375 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
532 376
533 if (file->f_flags & O_DIRECT) { 377 if (file->f_flags & O_DIRECT) {
534 pages = get_direct_page_vector(data, num_pages, off, len); 378 pages = ceph_get_direct_page_vector(data, num_pages, off, len);
535 379
536 /* 380 /*
537 * flush any page cache pages in this range. this 381 * flush any page cache pages in this range. this
@@ -552,13 +396,13 @@ static ssize_t ceph_sync_read(struct file *file, char __user *data,
552 ret = striped_read(inode, off, len, pages, num_pages, checkeof); 396 ret = striped_read(inode, off, len, pages, num_pages, checkeof);
553 397
554 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0) 398 if (ret >= 0 && (file->f_flags & O_DIRECT) == 0)
555 ret = copy_page_vector_to_user(pages, data, off, ret); 399 ret = ceph_copy_page_vector_to_user(pages, data, off, ret);
556 if (ret >= 0) 400 if (ret >= 0)
557 *poff = off + ret; 401 *poff = off + ret;
558 402
559done: 403done:
560 if (file->f_flags & O_DIRECT) 404 if (file->f_flags & O_DIRECT)
561 put_page_vector(pages, num_pages); 405 ceph_put_page_vector(pages, num_pages);
562 else 406 else
563 ceph_release_page_vector(pages, num_pages); 407 ceph_release_page_vector(pages, num_pages);
564 dout("sync_read result %d\n", ret); 408 dout("sync_read result %d\n", ret);
@@ -594,7 +438,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
594{ 438{
595 struct inode *inode = file->f_dentry->d_inode; 439 struct inode *inode = file->f_dentry->d_inode;
596 struct ceph_inode_info *ci = ceph_inode(inode); 440 struct ceph_inode_info *ci = ceph_inode(inode);
597 struct ceph_client *client = ceph_inode_to_client(inode); 441 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
598 struct ceph_osd_request *req; 442 struct ceph_osd_request *req;
599 struct page **pages; 443 struct page **pages;
600 int num_pages; 444 int num_pages;
@@ -642,7 +486,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
642 */ 486 */
643more: 487more:
644 len = left; 488 len = left;
645 req = ceph_osdc_new_request(&client->osdc, &ci->i_layout, 489 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
646 ceph_vino(inode), pos, &len, 490 ceph_vino(inode), pos, &len,
647 CEPH_OSD_OP_WRITE, flags, 491 CEPH_OSD_OP_WRITE, flags,
648 ci->i_snap_realm->cached_context, 492 ci->i_snap_realm->cached_context,
@@ -655,7 +499,7 @@ more:
655 num_pages = calc_pages_for(pos, len); 499 num_pages = calc_pages_for(pos, len);
656 500
657 if (file->f_flags & O_DIRECT) { 501 if (file->f_flags & O_DIRECT) {
658 pages = get_direct_page_vector(data, num_pages, pos, len); 502 pages = ceph_get_direct_page_vector(data, num_pages, pos, len);
659 if (IS_ERR(pages)) { 503 if (IS_ERR(pages)) {
660 ret = PTR_ERR(pages); 504 ret = PTR_ERR(pages);
661 goto out; 505 goto out;
@@ -673,7 +517,7 @@ more:
673 ret = PTR_ERR(pages); 517 ret = PTR_ERR(pages);
674 goto out; 518 goto out;
675 } 519 }
676 ret = copy_user_to_page_vector(pages, data, pos, len); 520 ret = ceph_copy_user_to_page_vector(pages, data, pos, len);
677 if (ret < 0) { 521 if (ret < 0) {
678 ceph_release_page_vector(pages, num_pages); 522 ceph_release_page_vector(pages, num_pages);
679 goto out; 523 goto out;
@@ -689,7 +533,7 @@ more:
689 req->r_num_pages = num_pages; 533 req->r_num_pages = num_pages;
690 req->r_inode = inode; 534 req->r_inode = inode;
691 535
692 ret = ceph_osdc_start_request(&client->osdc, req, false); 536 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
693 if (!ret) { 537 if (!ret) {
694 if (req->r_safe_callback) { 538 if (req->r_safe_callback) {
695 /* 539 /*
@@ -701,11 +545,11 @@ more:
701 spin_unlock(&ci->i_unsafe_lock); 545 spin_unlock(&ci->i_unsafe_lock);
702 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); 546 ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
703 } 547 }
704 ret = ceph_osdc_wait_request(&client->osdc, req); 548 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
705 } 549 }
706 550
707 if (file->f_flags & O_DIRECT) 551 if (file->f_flags & O_DIRECT)
708 put_page_vector(pages, num_pages); 552 ceph_put_page_vector(pages, num_pages);
709 else if (file->f_flags & O_SYNC) 553 else if (file->f_flags & O_SYNC)
710 ceph_release_page_vector(pages, num_pages); 554 ceph_release_page_vector(pages, num_pages);
711 555
@@ -814,7 +658,8 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov,
814 struct ceph_file_info *fi = file->private_data; 658 struct ceph_file_info *fi = file->private_data;
815 struct inode *inode = file->f_dentry->d_inode; 659 struct inode *inode = file->f_dentry->d_inode;
816 struct ceph_inode_info *ci = ceph_inode(inode); 660 struct ceph_inode_info *ci = ceph_inode(inode);
817 struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; 661 struct ceph_osd_client *osdc =
662 &ceph_sb_to_client(inode->i_sb)->client->osdc;
818 loff_t endoff = pos + iov->iov_len; 663 loff_t endoff = pos + iov->iov_len;
819 int want, got = 0; 664 int want, got = 0;
820 int ret, err; 665 int ret, err;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 62377ec37edf..1d6a45b5a04c 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/module.h> 3#include <linux/module.h>
4#include <linux/fs.h> 4#include <linux/fs.h>
@@ -13,7 +13,8 @@
13#include <linux/pagevec.h> 13#include <linux/pagevec.h>
14 14
15#include "super.h" 15#include "super.h"
16#include "decode.h" 16#include "mds_client.h"
17#include <linux/ceph/decode.h>
17 18
18/* 19/*
19 * Ceph inode operations 20 * Ceph inode operations
@@ -384,7 +385,7 @@ void ceph_destroy_inode(struct inode *inode)
384 */ 385 */
385 if (ci->i_snap_realm) { 386 if (ci->i_snap_realm) {
386 struct ceph_mds_client *mdsc = 387 struct ceph_mds_client *mdsc =
387 &ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc; 388 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
388 struct ceph_snap_realm *realm = ci->i_snap_realm; 389 struct ceph_snap_realm *realm = ci->i_snap_realm;
389 390
390 dout(" dropping residual ref to snap realm %p\n", realm); 391 dout(" dropping residual ref to snap realm %p\n", realm);
@@ -685,7 +686,7 @@ static int fill_inode(struct inode *inode,
685 } 686 }
686 687
687 /* it may be better to set st_size in getattr instead? */ 688 /* it may be better to set st_size in getattr instead? */
688 if (ceph_test_opt(ceph_sb_to_client(inode->i_sb), RBYTES)) 689 if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), RBYTES))
689 inode->i_size = ci->i_rbytes; 690 inode->i_size = ci->i_rbytes;
690 break; 691 break;
691 default: 692 default:
@@ -901,7 +902,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
901 struct inode *in = NULL; 902 struct inode *in = NULL;
902 struct ceph_mds_reply_inode *ininfo; 903 struct ceph_mds_reply_inode *ininfo;
903 struct ceph_vino vino; 904 struct ceph_vino vino;
904 struct ceph_client *client = ceph_sb_to_client(sb); 905 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
905 int i = 0; 906 int i = 0;
906 int err = 0; 907 int err = 0;
907 908
@@ -965,7 +966,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
965 */ 966 */
966 if (rinfo->head->is_dentry && !req->r_aborted && 967 if (rinfo->head->is_dentry && !req->r_aborted &&
967 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, 968 (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
968 client->mount_args->snapdir_name, 969 fsc->mount_options->snapdir_name,
969 req->r_dentry->d_name.len))) { 970 req->r_dentry->d_name.len))) {
970 /* 971 /*
971 * lookup link rename : null -> possibly existing inode 972 * lookup link rename : null -> possibly existing inode
@@ -1533,7 +1534,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1533 struct inode *parent_inode = dentry->d_parent->d_inode; 1534 struct inode *parent_inode = dentry->d_parent->d_inode;
1534 const unsigned int ia_valid = attr->ia_valid; 1535 const unsigned int ia_valid = attr->ia_valid;
1535 struct ceph_mds_request *req; 1536 struct ceph_mds_request *req;
1536 struct ceph_mds_client *mdsc = &ceph_sb_to_client(dentry->d_sb)->mdsc; 1537 struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1537 int issued; 1538 int issued;
1538 int release = 0, dirtied = 0; 1539 int release = 0, dirtied = 0;
1539 int mask = 0; 1540 int mask = 0;
@@ -1728,8 +1729,8 @@ out:
1728 */ 1729 */
1729int ceph_do_getattr(struct inode *inode, int mask) 1730int ceph_do_getattr(struct inode *inode, int mask)
1730{ 1731{
1731 struct ceph_client *client = ceph_sb_to_client(inode->i_sb); 1732 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1732 struct ceph_mds_client *mdsc = &client->mdsc; 1733 struct ceph_mds_client *mdsc = fsc->mdsc;
1733 struct ceph_mds_request *req; 1734 struct ceph_mds_request *req;
1734 int err; 1735 int err;
1735 1736
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 76e307d2aba1..899578b0c46b 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -1,8 +1,10 @@
1#include <linux/in.h> 1#include <linux/in.h>
2 2
3#include "ioctl.h"
4#include "super.h" 3#include "super.h"
5#include "ceph_debug.h" 4#include "mds_client.h"
5#include <linux/ceph/ceph_debug.h>
6
7#include "ioctl.h"
6 8
7 9
8/* 10/*
@@ -37,7 +39,7 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
37{ 39{
38 struct inode *inode = file->f_dentry->d_inode; 40 struct inode *inode = file->f_dentry->d_inode;
39 struct inode *parent_inode = file->f_dentry->d_parent->d_inode; 41 struct inode *parent_inode = file->f_dentry->d_parent->d_inode;
40 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 42 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
41 struct ceph_mds_request *req; 43 struct ceph_mds_request *req;
42 struct ceph_ioctl_layout l; 44 struct ceph_ioctl_layout l;
43 int err, i; 45 int err, i;
@@ -98,7 +100,8 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
98 struct ceph_ioctl_dataloc dl; 100 struct ceph_ioctl_dataloc dl;
99 struct inode *inode = file->f_dentry->d_inode; 101 struct inode *inode = file->f_dentry->d_inode;
100 struct ceph_inode_info *ci = ceph_inode(inode); 102 struct ceph_inode_info *ci = ceph_inode(inode);
101 struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->osdc; 103 struct ceph_osd_client *osdc =
104 &ceph_sb_to_client(inode->i_sb)->client->osdc;
102 u64 len = 1, olen; 105 u64 len = 1, olen;
103 u64 tmp; 106 u64 tmp;
104 struct ceph_object_layout ol; 107 struct ceph_object_layout ol;
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ff4e753aae92..087afe9ca367 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -1,11 +1,11 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/file.h> 3#include <linux/file.h>
4#include <linux/namei.h> 4#include <linux/namei.h>
5 5
6#include "super.h" 6#include "super.h"
7#include "mds_client.h" 7#include "mds_client.h"
8#include "pagelist.h" 8#include <linux/ceph/pagelist.h>
9 9
10/** 10/**
11 * Implement fcntl and flock locking functions. 11 * Implement fcntl and flock locking functions.
@@ -16,7 +16,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
16{ 16{
17 struct inode *inode = file->f_dentry->d_inode; 17 struct inode *inode = file->f_dentry->d_inode;
18 struct ceph_mds_client *mdsc = 18 struct ceph_mds_client *mdsc =
19 &ceph_sb_to_client(inode->i_sb)->mdsc; 19 ceph_sb_to_client(inode->i_sb)->mdsc;
20 struct ceph_mds_request *req; 20 struct ceph_mds_request *req;
21 int err; 21 int err;
22 22
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index fad95f8f2608..33568239a08e 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1,17 +1,20 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/wait.h> 3#include <linux/wait.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <linux/smp_lock.h> 6#include <linux/smp_lock.h>
7#include <linux/debugfs.h>
8#include <linux/seq_file.h>
7 9
8#include "mds_client.h"
9#include "mon_client.h"
10#include "super.h" 10#include "super.h"
11#include "messenger.h" 11#include "mds_client.h"
12#include "decode.h" 12
13#include "auth.h" 13#include <linux/ceph/messenger.h>
14#include "pagelist.h" 14#include <linux/ceph/decode.h>
15#include <linux/ceph/pagelist.h>
16#include <linux/ceph/auth.h>
17#include <linux/ceph/debugfs.h>
15 18
16/* 19/*
17 * A cluster of MDS (metadata server) daemons is responsible for 20 * A cluster of MDS (metadata server) daemons is responsible for
@@ -286,8 +289,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
286 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); 289 atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
287 if (atomic_dec_and_test(&s->s_ref)) { 290 if (atomic_dec_and_test(&s->s_ref)) {
288 if (s->s_authorizer) 291 if (s->s_authorizer)
289 s->s_mdsc->client->monc.auth->ops->destroy_authorizer( 292 s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer(
290 s->s_mdsc->client->monc.auth, s->s_authorizer); 293 s->s_mdsc->fsc->client->monc.auth,
294 s->s_authorizer);
291 kfree(s); 295 kfree(s);
292 } 296 }
293} 297}
@@ -344,7 +348,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
344 s->s_seq = 0; 348 s->s_seq = 0;
345 mutex_init(&s->s_mutex); 349 mutex_init(&s->s_mutex);
346 350
347 ceph_con_init(mdsc->client->msgr, &s->s_con); 351 ceph_con_init(mdsc->fsc->client->msgr, &s->s_con);
348 s->s_con.private = s; 352 s->s_con.private = s;
349 s->s_con.ops = &mds_con_ops; 353 s->s_con.ops = &mds_con_ops;
350 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS; 354 s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
@@ -599,7 +603,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc,
599 } else if (req->r_dentry) { 603 } else if (req->r_dentry) {
600 struct inode *dir = req->r_dentry->d_parent->d_inode; 604 struct inode *dir = req->r_dentry->d_parent->d_inode;
601 605
602 if (dir->i_sb != mdsc->client->sb) { 606 if (dir->i_sb != mdsc->fsc->sb) {
603 /* not this fs! */ 607 /* not this fs! */
604 inode = req->r_dentry->d_inode; 608 inode = req->r_dentry->d_inode;
605 } else if (ceph_snap(dir) != CEPH_NOSNAP) { 609 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
@@ -884,7 +888,7 @@ static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
884 __ceph_remove_cap(cap); 888 __ceph_remove_cap(cap);
885 if (!__ceph_is_any_real_caps(ci)) { 889 if (!__ceph_is_any_real_caps(ci)) {
886 struct ceph_mds_client *mdsc = 890 struct ceph_mds_client *mdsc =
887 &ceph_sb_to_client(inode->i_sb)->mdsc; 891 ceph_sb_to_client(inode->i_sb)->mdsc;
888 892
889 spin_lock(&mdsc->cap_dirty_lock); 893 spin_lock(&mdsc->cap_dirty_lock);
890 if (!list_empty(&ci->i_dirty_item)) { 894 if (!list_empty(&ci->i_dirty_item)) {
@@ -1146,7 +1150,7 @@ int ceph_add_cap_releases(struct ceph_mds_client *mdsc,
1146 struct ceph_msg *msg, *partial = NULL; 1150 struct ceph_msg *msg, *partial = NULL;
1147 struct ceph_mds_cap_release *head; 1151 struct ceph_mds_cap_release *head;
1148 int err = -ENOMEM; 1152 int err = -ENOMEM;
1149 int extra = mdsc->client->mount_args->cap_release_safety; 1153 int extra = mdsc->fsc->mount_options->cap_release_safety;
1150 int num; 1154 int num;
1151 1155
1152 dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds, 1156 dout("add_cap_releases %p mds%d extra %d\n", session, session->s_mds,
@@ -2085,7 +2089,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2085 2089
2086 /* insert trace into our cache */ 2090 /* insert trace into our cache */
2087 mutex_lock(&req->r_fill_mutex); 2091 mutex_lock(&req->r_fill_mutex);
2088 err = ceph_fill_trace(mdsc->client->sb, req, req->r_session); 2092 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session);
2089 if (err == 0) { 2093 if (err == 0) {
2090 if (result == 0 && rinfo->dir_nr) 2094 if (result == 0 && rinfo->dir_nr)
2091 ceph_readdir_prepopulate(req, req->r_session); 2095 ceph_readdir_prepopulate(req, req->r_session);
@@ -2613,7 +2617,7 @@ static void handle_lease(struct ceph_mds_client *mdsc,
2613 struct ceph_mds_session *session, 2617 struct ceph_mds_session *session,
2614 struct ceph_msg *msg) 2618 struct ceph_msg *msg)
2615{ 2619{
2616 struct super_block *sb = mdsc->client->sb; 2620 struct super_block *sb = mdsc->fsc->sb;
2617 struct inode *inode; 2621 struct inode *inode;
2618 struct ceph_inode_info *ci; 2622 struct ceph_inode_info *ci;
2619 struct dentry *parent, *dentry; 2623 struct dentry *parent, *dentry;
@@ -2891,10 +2895,16 @@ static void delayed_work(struct work_struct *work)
2891 schedule_delayed(mdsc); 2895 schedule_delayed(mdsc);
2892} 2896}
2893 2897
2898int ceph_mdsc_init(struct ceph_fs_client *fsc)
2894 2899
2895int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2896{ 2900{
2897 mdsc->client = client; 2901 struct ceph_mds_client *mdsc;
2902
2903 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
2904 if (!mdsc)
2905 return -ENOMEM;
2906 mdsc->fsc = fsc;
2907 fsc->mdsc = mdsc;
2898 mutex_init(&mdsc->mutex); 2908 mutex_init(&mdsc->mutex);
2899 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); 2909 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
2900 if (mdsc->mdsmap == NULL) 2910 if (mdsc->mdsmap == NULL)
@@ -2927,7 +2937,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2927 INIT_LIST_HEAD(&mdsc->dentry_lru); 2937 INIT_LIST_HEAD(&mdsc->dentry_lru);
2928 2938
2929 ceph_caps_init(mdsc); 2939 ceph_caps_init(mdsc);
2930 ceph_adjust_min_caps(mdsc, client->min_caps); 2940 ceph_adjust_min_caps(mdsc, fsc->min_caps);
2931 2941
2932 return 0; 2942 return 0;
2933} 2943}
@@ -2939,7 +2949,7 @@ int ceph_mdsc_init(struct ceph_mds_client *mdsc, struct ceph_client *client)
2939static void wait_requests(struct ceph_mds_client *mdsc) 2949static void wait_requests(struct ceph_mds_client *mdsc)
2940{ 2950{
2941 struct ceph_mds_request *req; 2951 struct ceph_mds_request *req;
2942 struct ceph_client *client = mdsc->client; 2952 struct ceph_fs_client *fsc = mdsc->fsc;
2943 2953
2944 mutex_lock(&mdsc->mutex); 2954 mutex_lock(&mdsc->mutex);
2945 if (__get_oldest_req(mdsc)) { 2955 if (__get_oldest_req(mdsc)) {
@@ -2947,7 +2957,7 @@ static void wait_requests(struct ceph_mds_client *mdsc)
2947 2957
2948 dout("wait_requests waiting for requests\n"); 2958 dout("wait_requests waiting for requests\n");
2949 wait_for_completion_timeout(&mdsc->safe_umount_waiters, 2959 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
2950 client->mount_args->mount_timeout * HZ); 2960 fsc->client->options->mount_timeout * HZ);
2951 2961
2952 /* tear down remaining requests */ 2962 /* tear down remaining requests */
2953 mutex_lock(&mdsc->mutex); 2963 mutex_lock(&mdsc->mutex);
@@ -3030,7 +3040,7 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
3030{ 3040{
3031 u64 want_tid, want_flush; 3041 u64 want_tid, want_flush;
3032 3042
3033 if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) 3043 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3034 return; 3044 return;
3035 3045
3036 dout("sync\n"); 3046 dout("sync\n");
@@ -3053,7 +3063,7 @@ bool done_closing_sessions(struct ceph_mds_client *mdsc)
3053{ 3063{
3054 int i, n = 0; 3064 int i, n = 0;
3055 3065
3056 if (mdsc->client->mount_state == CEPH_MOUNT_SHUTDOWN) 3066 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
3057 return true; 3067 return true;
3058 3068
3059 mutex_lock(&mdsc->mutex); 3069 mutex_lock(&mdsc->mutex);
@@ -3071,8 +3081,8 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3071{ 3081{
3072 struct ceph_mds_session *session; 3082 struct ceph_mds_session *session;
3073 int i; 3083 int i;
3074 struct ceph_client *client = mdsc->client; 3084 struct ceph_fs_client *fsc = mdsc->fsc;
3075 unsigned long timeout = client->mount_args->mount_timeout * HZ; 3085 unsigned long timeout = fsc->client->options->mount_timeout * HZ;
3076 3086
3077 dout("close_sessions\n"); 3087 dout("close_sessions\n");
3078 3088
@@ -3119,7 +3129,7 @@ void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
3119 dout("stopped\n"); 3129 dout("stopped\n");
3120} 3130}
3121 3131
3122void ceph_mdsc_stop(struct ceph_mds_client *mdsc) 3132static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3123{ 3133{
3124 dout("stop\n"); 3134 dout("stop\n");
3125 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ 3135 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
@@ -3129,6 +3139,15 @@ void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
3129 ceph_caps_finalize(mdsc); 3139 ceph_caps_finalize(mdsc);
3130} 3140}
3131 3141
3142void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
3143{
3144 struct ceph_mds_client *mdsc = fsc->mdsc;
3145
3146 ceph_mdsc_stop(mdsc);
3147 fsc->mdsc = NULL;
3148 kfree(mdsc);
3149}
3150
3132 3151
3133/* 3152/*
3134 * handle mds map update. 3153 * handle mds map update.
@@ -3145,14 +3164,14 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3145 3164
3146 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad); 3165 ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
3147 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 3166 ceph_decode_copy(&p, &fsid, sizeof(fsid));
3148 if (ceph_check_fsid(mdsc->client, &fsid) < 0) 3167 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
3149 return; 3168 return;
3150 epoch = ceph_decode_32(&p); 3169 epoch = ceph_decode_32(&p);
3151 maplen = ceph_decode_32(&p); 3170 maplen = ceph_decode_32(&p);
3152 dout("handle_map epoch %u len %d\n", epoch, (int)maplen); 3171 dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
3153 3172
3154 /* do we need it? */ 3173 /* do we need it? */
3155 ceph_monc_got_mdsmap(&mdsc->client->monc, epoch); 3174 ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch);
3156 mutex_lock(&mdsc->mutex); 3175 mutex_lock(&mdsc->mutex);
3157 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { 3176 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
3158 dout("handle_map epoch %u <= our %u\n", 3177 dout("handle_map epoch %u <= our %u\n",
@@ -3176,7 +3195,7 @@ void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
3176 } else { 3195 } else {
3177 mdsc->mdsmap = newmap; /* first mds map */ 3196 mdsc->mdsmap = newmap; /* first mds map */
3178 } 3197 }
3179 mdsc->client->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; 3198 mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size;
3180 3199
3181 __wake_requests(mdsc, &mdsc->waiting_for_map); 3200 __wake_requests(mdsc, &mdsc->waiting_for_map);
3182 3201
@@ -3277,7 +3296,7 @@ static int get_authorizer(struct ceph_connection *con,
3277{ 3296{
3278 struct ceph_mds_session *s = con->private; 3297 struct ceph_mds_session *s = con->private;
3279 struct ceph_mds_client *mdsc = s->s_mdsc; 3298 struct ceph_mds_client *mdsc = s->s_mdsc;
3280 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3299 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3281 int ret = 0; 3300 int ret = 0;
3282 3301
3283 if (force_new && s->s_authorizer) { 3302 if (force_new && s->s_authorizer) {
@@ -3311,7 +3330,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len)
3311{ 3330{
3312 struct ceph_mds_session *s = con->private; 3331 struct ceph_mds_session *s = con->private;
3313 struct ceph_mds_client *mdsc = s->s_mdsc; 3332 struct ceph_mds_client *mdsc = s->s_mdsc;
3314 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3333 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3315 3334
3316 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len); 3335 return ac->ops->verify_authorizer_reply(ac, s->s_authorizer, len);
3317} 3336}
@@ -3320,12 +3339,12 @@ static int invalidate_authorizer(struct ceph_connection *con)
3320{ 3339{
3321 struct ceph_mds_session *s = con->private; 3340 struct ceph_mds_session *s = con->private;
3322 struct ceph_mds_client *mdsc = s->s_mdsc; 3341 struct ceph_mds_client *mdsc = s->s_mdsc;
3323 struct ceph_auth_client *ac = mdsc->client->monc.auth; 3342 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
3324 3343
3325 if (ac->ops->invalidate_authorizer) 3344 if (ac->ops->invalidate_authorizer)
3326 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); 3345 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
3327 3346
3328 return ceph_monc_validate_auth(&mdsc->client->monc); 3347 return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
3329} 3348}
3330 3349
3331static const struct ceph_connection_operations mds_con_ops = { 3350static const struct ceph_connection_operations mds_con_ops = {
@@ -3338,7 +3357,4 @@ static const struct ceph_connection_operations mds_con_ops = {
3338 .peer_reset = peer_reset, 3357 .peer_reset = peer_reset,
3339}; 3358};
3340 3359
3341
3342
3343
3344/* eof */ 3360/* eof */
diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h
index c98267ce6d2a..d66d63c72355 100644
--- a/fs/ceph/mds_client.h
+++ b/fs/ceph/mds_client.h
@@ -8,9 +8,9 @@
8#include <linux/rbtree.h> 8#include <linux/rbtree.h>
9#include <linux/spinlock.h> 9#include <linux/spinlock.h>
10 10
11#include "types.h" 11#include <linux/ceph/types.h>
12#include "messenger.h" 12#include <linux/ceph/messenger.h>
13#include "mdsmap.h" 13#include <linux/ceph/mdsmap.h>
14 14
15/* 15/*
16 * Some lock dependencies: 16 * Some lock dependencies:
@@ -26,7 +26,7 @@
26 * 26 *
27 */ 27 */
28 28
29struct ceph_client; 29struct ceph_fs_client;
30struct ceph_cap; 30struct ceph_cap;
31 31
32/* 32/*
@@ -230,7 +230,7 @@ struct ceph_mds_request {
230 * mds client state 230 * mds client state
231 */ 231 */
232struct ceph_mds_client { 232struct ceph_mds_client {
233 struct ceph_client *client; 233 struct ceph_fs_client *fsc;
234 struct mutex mutex; /* all nested structures */ 234 struct mutex mutex; /* all nested structures */
235 235
236 struct ceph_mdsmap *mdsmap; 236 struct ceph_mdsmap *mdsmap;
@@ -289,11 +289,6 @@ struct ceph_mds_client {
289 int caps_avail_count; /* unused, unreserved */ 289 int caps_avail_count; /* unused, unreserved */
290 int caps_min_count; /* keep at least this many 290 int caps_min_count; /* keep at least this many
291 (unreserved) */ 291 (unreserved) */
292
293#ifdef CONFIG_DEBUG_FS
294 struct dentry *debugfs_file;
295#endif
296
297 spinlock_t dentry_lru_lock; 292 spinlock_t dentry_lru_lock;
298 struct list_head dentry_lru; 293 struct list_head dentry_lru;
299 int num_dentry; 294 int num_dentry;
@@ -316,10 +311,9 @@ extern void ceph_put_mds_session(struct ceph_mds_session *s);
316extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc, 311extern int ceph_send_msg_mds(struct ceph_mds_client *mdsc,
317 struct ceph_msg *msg, int mds); 312 struct ceph_msg *msg, int mds);
318 313
319extern int ceph_mdsc_init(struct ceph_mds_client *mdsc, 314extern int ceph_mdsc_init(struct ceph_fs_client *fsc);
320 struct ceph_client *client);
321extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc); 315extern void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc);
322extern void ceph_mdsc_stop(struct ceph_mds_client *mdsc); 316extern void ceph_mdsc_destroy(struct ceph_fs_client *fsc);
323 317
324extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc); 318extern void ceph_mdsc_sync(struct ceph_mds_client *mdsc);
325 319
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 040be6d1150b..73b7d44e8a35 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -1,4 +1,4 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/bug.h> 3#include <linux/bug.h>
4#include <linux/err.h> 4#include <linux/err.h>
@@ -6,9 +6,9 @@
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/types.h> 7#include <linux/types.h>
8 8
9#include "mdsmap.h" 9#include <linux/ceph/mdsmap.h>
10#include "messenger.h" 10#include <linux/ceph/messenger.h>
11#include "decode.h" 11#include <linux/ceph/decode.h>
12 12
13#include "super.h" 13#include "super.h"
14 14
@@ -117,7 +117,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end)
117 } 117 }
118 118
119 dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n", 119 dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s\n",
120 i+1, n, global_id, mds, inc, pr_addr(&addr.in_addr), 120 i+1, n, global_id, mds, inc,
121 ceph_pr_addr(&addr.in_addr),
121 ceph_mds_state_name(state)); 122 ceph_mds_state_name(state));
122 if (mds >= 0 && mds < m->m_max_mds && state > 0) { 123 if (mds >= 0 && mds < m->m_max_mds && state > 0) {
123 m->m_info[mds].global_id = global_id; 124 m->m_info[mds].global_id = global_id;
diff --git a/fs/ceph/mdsmap.h b/fs/ceph/mdsmap.h
deleted file mode 100644
index 4c5cb0880bba..000000000000
--- a/fs/ceph/mdsmap.h
+++ /dev/null
@@ -1,62 +0,0 @@
1#ifndef _FS_CEPH_MDSMAP_H
2#define _FS_CEPH_MDSMAP_H
3
4#include "types.h"
5
6/*
7 * mds map - describe servers in the mds cluster.
8 *
9 * we limit fields to those the client actually xcares about
10 */
11struct ceph_mds_info {
12 u64 global_id;
13 struct ceph_entity_addr addr;
14 s32 state;
15 int num_export_targets;
16 bool laggy;
17 u32 *export_targets;
18};
19
20struct ceph_mdsmap {
21 u32 m_epoch, m_client_epoch, m_last_failure;
22 u32 m_root;
23 u32 m_session_timeout; /* seconds */
24 u32 m_session_autoclose; /* seconds */
25 u64 m_max_file_size;
26 u32 m_max_mds; /* size of m_addr, m_state arrays */
27 struct ceph_mds_info *m_info;
28
29 /* which object pools file data can be stored in */
30 int m_num_data_pg_pools;
31 u32 *m_data_pg_pools;
32 u32 m_cas_pg_pool;
33};
34
35static inline struct ceph_entity_addr *
36ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
37{
38 if (w >= m->m_max_mds)
39 return NULL;
40 return &m->m_info[w].addr;
41}
42
43static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
44{
45 BUG_ON(w < 0);
46 if (w >= m->m_max_mds)
47 return CEPH_MDS_STATE_DNE;
48 return m->m_info[w].state;
49}
50
51static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
52{
53 if (w >= 0 && w < m->m_max_mds)
54 return m->m_info[w].laggy;
55 return false;
56}
57
58extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
59extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end);
60extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
61
62#endif
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
deleted file mode 100644
index 17a09b32a591..000000000000
--- a/fs/ceph/messenger.c
+++ /dev/null
@@ -1,2432 +0,0 @@
1#include "ceph_debug.h"
2
3#include <linux/crc32c.h>
4#include <linux/ctype.h>
5#include <linux/highmem.h>
6#include <linux/inet.h>
7#include <linux/kthread.h>
8#include <linux/net.h>
9#include <linux/slab.h>
10#include <linux/socket.h>
11#include <linux/string.h>
12#include <linux/bio.h>
13#include <linux/blkdev.h>
14#include <net/tcp.h>
15
16#include "super.h"
17#include "messenger.h"
18#include "decode.h"
19#include "pagelist.h"
20
21/*
22 * Ceph uses the messenger to exchange ceph_msg messages with other
23 * hosts in the system. The messenger provides ordered and reliable
24 * delivery. We tolerate TCP disconnects by reconnecting (with
25 * exponential backoff) in the case of a fault (disconnection, bad
26 * crc, protocol error). Acks allow sent messages to be discarded by
27 * the sender.
28 */
29
30/* static tag bytes (protocol control messages) */
31static char tag_msg = CEPH_MSGR_TAG_MSG;
32static char tag_ack = CEPH_MSGR_TAG_ACK;
33static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
34
35#ifdef CONFIG_LOCKDEP
36static struct lock_class_key socket_class;
37#endif
38
39
40static void queue_con(struct ceph_connection *con);
41static void con_work(struct work_struct *);
42static void ceph_fault(struct ceph_connection *con);
43
44/*
45 * nicely render a sockaddr as a string.
46 */
47#define MAX_ADDR_STR 20
48#define MAX_ADDR_STR_LEN 60
49static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
50static DEFINE_SPINLOCK(addr_str_lock);
51static int last_addr_str;
52
53const char *pr_addr(const struct sockaddr_storage *ss)
54{
55 int i;
56 char *s;
57 struct sockaddr_in *in4 = (void *)ss;
58 struct sockaddr_in6 *in6 = (void *)ss;
59
60 spin_lock(&addr_str_lock);
61 i = last_addr_str++;
62 if (last_addr_str == MAX_ADDR_STR)
63 last_addr_str = 0;
64 spin_unlock(&addr_str_lock);
65 s = addr_str[i];
66
67 switch (ss->ss_family) {
68 case AF_INET:
69 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
70 (unsigned int)ntohs(in4->sin_port));
71 break;
72
73 case AF_INET6:
74 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
75 (unsigned int)ntohs(in6->sin6_port));
76 break;
77
78 default:
79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
80 }
81
82 return s;
83}
84
85static void encode_my_addr(struct ceph_messenger *msgr)
86{
87 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
88 ceph_encode_addr(&msgr->my_enc_addr);
89}
90
91/*
92 * work queue for all reading and writing to/from the socket.
93 */
94struct workqueue_struct *ceph_msgr_wq;
95
96int __init ceph_msgr_init(void)
97{
98 ceph_msgr_wq = create_workqueue("ceph-msgr");
99 if (IS_ERR(ceph_msgr_wq)) {
100 int ret = PTR_ERR(ceph_msgr_wq);
101 pr_err("msgr_init failed to create workqueue: %d\n", ret);
102 ceph_msgr_wq = NULL;
103 return ret;
104 }
105 return 0;
106}
107
108void ceph_msgr_exit(void)
109{
110 destroy_workqueue(ceph_msgr_wq);
111}
112
113void ceph_msgr_flush(void)
114{
115 flush_workqueue(ceph_msgr_wq);
116}
117
118
119/*
120 * socket callback functions
121 */
122
123/* data available on socket, or listen socket received a connect */
124static void ceph_data_ready(struct sock *sk, int count_unused)
125{
126 struct ceph_connection *con =
127 (struct ceph_connection *)sk->sk_user_data;
128 if (sk->sk_state != TCP_CLOSE_WAIT) {
129 dout("ceph_data_ready on %p state = %lu, queueing work\n",
130 con, con->state);
131 queue_con(con);
132 }
133}
134
135/* socket has buffer space for writing */
136static void ceph_write_space(struct sock *sk)
137{
138 struct ceph_connection *con =
139 (struct ceph_connection *)sk->sk_user_data;
140
141 /* only queue to workqueue if there is data we want to write. */
142 if (test_bit(WRITE_PENDING, &con->state)) {
143 dout("ceph_write_space %p queueing write work\n", con);
144 queue_con(con);
145 } else {
146 dout("ceph_write_space %p nothing to write\n", con);
147 }
148
149 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
150 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
151}
152
153/* socket's state has changed */
154static void ceph_state_change(struct sock *sk)
155{
156 struct ceph_connection *con =
157 (struct ceph_connection *)sk->sk_user_data;
158
159 dout("ceph_state_change %p state = %lu sk_state = %u\n",
160 con, con->state, sk->sk_state);
161
162 if (test_bit(CLOSED, &con->state))
163 return;
164
165 switch (sk->sk_state) {
166 case TCP_CLOSE:
167 dout("ceph_state_change TCP_CLOSE\n");
168 case TCP_CLOSE_WAIT:
169 dout("ceph_state_change TCP_CLOSE_WAIT\n");
170 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
171 if (test_bit(CONNECTING, &con->state))
172 con->error_msg = "connection failed";
173 else
174 con->error_msg = "socket closed";
175 queue_con(con);
176 }
177 break;
178 case TCP_ESTABLISHED:
179 dout("ceph_state_change TCP_ESTABLISHED\n");
180 queue_con(con);
181 break;
182 }
183}
184
185/*
186 * set up socket callbacks
187 */
188static void set_sock_callbacks(struct socket *sock,
189 struct ceph_connection *con)
190{
191 struct sock *sk = sock->sk;
192 sk->sk_user_data = (void *)con;
193 sk->sk_data_ready = ceph_data_ready;
194 sk->sk_write_space = ceph_write_space;
195 sk->sk_state_change = ceph_state_change;
196}
197
198
199/*
200 * socket helpers
201 */
202
203/*
204 * initiate connection to a remote socket.
205 */
206static struct socket *ceph_tcp_connect(struct ceph_connection *con)
207{
208 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
209 struct socket *sock;
210 int ret;
211
212 BUG_ON(con->sock);
213 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
214 IPPROTO_TCP, &sock);
215 if (ret)
216 return ERR_PTR(ret);
217 con->sock = sock;
218 sock->sk->sk_allocation = GFP_NOFS;
219
220#ifdef CONFIG_LOCKDEP
221 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
222#endif
223
224 set_sock_callbacks(sock, con);
225
226 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
227
228 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
229 O_NONBLOCK);
230 if (ret == -EINPROGRESS) {
231 dout("connect %s EINPROGRESS sk_state = %u\n",
232 pr_addr(&con->peer_addr.in_addr),
233 sock->sk->sk_state);
234 ret = 0;
235 }
236 if (ret < 0) {
237 pr_err("connect %s error %d\n",
238 pr_addr(&con->peer_addr.in_addr), ret);
239 sock_release(sock);
240 con->sock = NULL;
241 con->error_msg = "connect error";
242 }
243
244 if (ret < 0)
245 return ERR_PTR(ret);
246 return sock;
247}
248
249static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
250{
251 struct kvec iov = {buf, len};
252 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
253
254 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
255}
256
257/*
258 * write something. @more is true if caller will be sending more data
259 * shortly.
260 */
261static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
262 size_t kvlen, size_t len, int more)
263{
264 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
265
266 if (more)
267 msg.msg_flags |= MSG_MORE;
268 else
269 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
270
271 return kernel_sendmsg(sock, &msg, iov, kvlen, len);
272}
273
274
275/*
276 * Shutdown/close the socket for the given connection.
277 */
278static int con_close_socket(struct ceph_connection *con)
279{
280 int rc;
281
282 dout("con_close_socket on %p sock %p\n", con, con->sock);
283 if (!con->sock)
284 return 0;
285 set_bit(SOCK_CLOSED, &con->state);
286 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
287 sock_release(con->sock);
288 con->sock = NULL;
289 clear_bit(SOCK_CLOSED, &con->state);
290 return rc;
291}
292
293/*
294 * Reset a connection. Discard all incoming and outgoing messages
295 * and clear *_seq state.
296 */
297static void ceph_msg_remove(struct ceph_msg *msg)
298{
299 list_del_init(&msg->list_head);
300 ceph_msg_put(msg);
301}
302static void ceph_msg_remove_list(struct list_head *head)
303{
304 while (!list_empty(head)) {
305 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
306 list_head);
307 ceph_msg_remove(msg);
308 }
309}
310
311static void reset_connection(struct ceph_connection *con)
312{
313 /* reset connection, out_queue, msg_ and connect_seq */
314 /* discard existing out_queue and msg_seq */
315 ceph_msg_remove_list(&con->out_queue);
316 ceph_msg_remove_list(&con->out_sent);
317
318 if (con->in_msg) {
319 ceph_msg_put(con->in_msg);
320 con->in_msg = NULL;
321 }
322
323 con->connect_seq = 0;
324 con->out_seq = 0;
325 if (con->out_msg) {
326 ceph_msg_put(con->out_msg);
327 con->out_msg = NULL;
328 }
329 con->out_keepalive_pending = false;
330 con->in_seq = 0;
331 con->in_seq_acked = 0;
332}
333
334/*
335 * mark a peer down. drop any open connections.
336 */
337void ceph_con_close(struct ceph_connection *con)
338{
339 dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
340 set_bit(CLOSED, &con->state); /* in case there's queued work */
341 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
342 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
343 clear_bit(KEEPALIVE_PENDING, &con->state);
344 clear_bit(WRITE_PENDING, &con->state);
345 mutex_lock(&con->mutex);
346 reset_connection(con);
347 con->peer_global_seq = 0;
348 cancel_delayed_work(&con->work);
349 mutex_unlock(&con->mutex);
350 queue_con(con);
351}
352
353/*
354 * Reopen a closed connection, with a new peer address.
355 */
356void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
357{
358 dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
359 set_bit(OPENING, &con->state);
360 clear_bit(CLOSED, &con->state);
361 memcpy(&con->peer_addr, addr, sizeof(*addr));
362 con->delay = 0; /* reset backoff memory */
363 queue_con(con);
364}
365
366/*
367 * return true if this connection ever successfully opened
368 */
369bool ceph_con_opened(struct ceph_connection *con)
370{
371 return con->connect_seq > 0;
372}
373
374/*
375 * generic get/put
376 */
377struct ceph_connection *ceph_con_get(struct ceph_connection *con)
378{
379 dout("con_get %p nref = %d -> %d\n", con,
380 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
381 if (atomic_inc_not_zero(&con->nref))
382 return con;
383 return NULL;
384}
385
386void ceph_con_put(struct ceph_connection *con)
387{
388 dout("con_put %p nref = %d -> %d\n", con,
389 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
390 BUG_ON(atomic_read(&con->nref) == 0);
391 if (atomic_dec_and_test(&con->nref)) {
392 BUG_ON(con->sock);
393 kfree(con);
394 }
395}
396
397/*
398 * initialize a new connection.
399 */
400void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
401{
402 dout("con_init %p\n", con);
403 memset(con, 0, sizeof(*con));
404 atomic_set(&con->nref, 1);
405 con->msgr = msgr;
406 mutex_init(&con->mutex);
407 INIT_LIST_HEAD(&con->out_queue);
408 INIT_LIST_HEAD(&con->out_sent);
409 INIT_DELAYED_WORK(&con->work, con_work);
410}
411
412
413/*
414 * We maintain a global counter to order connection attempts. Get
415 * a unique seq greater than @gt.
416 */
417static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
418{
419 u32 ret;
420
421 spin_lock(&msgr->global_seq_lock);
422 if (msgr->global_seq < gt)
423 msgr->global_seq = gt;
424 ret = ++msgr->global_seq;
425 spin_unlock(&msgr->global_seq_lock);
426 return ret;
427}
428
429
430/*
431 * Prepare footer for currently outgoing message, and finish things
432 * off. Assumes out_kvec* are already valid.. we just add on to the end.
433 */
434static void prepare_write_message_footer(struct ceph_connection *con, int v)
435{
436 struct ceph_msg *m = con->out_msg;
437
438 dout("prepare_write_message_footer %p\n", con);
439 con->out_kvec_is_msg = true;
440 con->out_kvec[v].iov_base = &m->footer;
441 con->out_kvec[v].iov_len = sizeof(m->footer);
442 con->out_kvec_bytes += sizeof(m->footer);
443 con->out_kvec_left++;
444 con->out_more = m->more_to_follow;
445 con->out_msg_done = true;
446}
447
448/*
449 * Prepare headers for the next outgoing message.
450 */
451static void prepare_write_message(struct ceph_connection *con)
452{
453 struct ceph_msg *m;
454 int v = 0;
455
456 con->out_kvec_bytes = 0;
457 con->out_kvec_is_msg = true;
458 con->out_msg_done = false;
459
460 /* Sneak an ack in there first? If we can get it into the same
461 * TCP packet that's a good thing. */
462 if (con->in_seq > con->in_seq_acked) {
463 con->in_seq_acked = con->in_seq;
464 con->out_kvec[v].iov_base = &tag_ack;
465 con->out_kvec[v++].iov_len = 1;
466 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
467 con->out_kvec[v].iov_base = &con->out_temp_ack;
468 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
469 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
470 }
471
472 m = list_first_entry(&con->out_queue,
473 struct ceph_msg, list_head);
474 con->out_msg = m;
475 if (test_bit(LOSSYTX, &con->state)) {
476 list_del_init(&m->list_head);
477 } else {
478 /* put message on sent list */
479 ceph_msg_get(m);
480 list_move_tail(&m->list_head, &con->out_sent);
481 }
482
483 /*
484 * only assign outgoing seq # if we haven't sent this message
485 * yet. if it is requeued, resend with it's original seq.
486 */
487 if (m->needs_out_seq) {
488 m->hdr.seq = cpu_to_le64(++con->out_seq);
489 m->needs_out_seq = false;
490 }
491
492 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
493 m, con->out_seq, le16_to_cpu(m->hdr.type),
494 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
495 le32_to_cpu(m->hdr.data_len),
496 m->nr_pages);
497 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
498
499 /* tag + hdr + front + middle */
500 con->out_kvec[v].iov_base = &tag_msg;
501 con->out_kvec[v++].iov_len = 1;
502 con->out_kvec[v].iov_base = &m->hdr;
503 con->out_kvec[v++].iov_len = sizeof(m->hdr);
504 con->out_kvec[v++] = m->front;
505 if (m->middle)
506 con->out_kvec[v++] = m->middle->vec;
507 con->out_kvec_left = v;
508 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
509 (m->middle ? m->middle->vec.iov_len : 0);
510 con->out_kvec_cur = con->out_kvec;
511
512 /* fill in crc (except data pages), footer */
513 con->out_msg->hdr.crc =
514 cpu_to_le32(crc32c(0, (void *)&m->hdr,
515 sizeof(m->hdr) - sizeof(m->hdr.crc)));
516 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
517 con->out_msg->footer.front_crc =
518 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
519 if (m->middle)
520 con->out_msg->footer.middle_crc =
521 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
522 m->middle->vec.iov_len));
523 else
524 con->out_msg->footer.middle_crc = 0;
525 con->out_msg->footer.data_crc = 0;
526 dout("prepare_write_message front_crc %u data_crc %u\n",
527 le32_to_cpu(con->out_msg->footer.front_crc),
528 le32_to_cpu(con->out_msg->footer.middle_crc));
529
530 /* is there a data payload? */
531 if (le32_to_cpu(m->hdr.data_len) > 0) {
532 /* initialize page iterator */
533 con->out_msg_pos.page = 0;
534 if (m->pages)
535 con->out_msg_pos.page_pos =
536 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
537 else
538 con->out_msg_pos.page_pos = 0;
539 con->out_msg_pos.data_pos = 0;
540 con->out_msg_pos.did_page_crc = 0;
541 con->out_more = 1; /* data + footer will follow */
542 } else {
543 /* no, queue up footer too and be done */
544 prepare_write_message_footer(con, v);
545 }
546
547 set_bit(WRITE_PENDING, &con->state);
548}
549
550/*
551 * Prepare an ack.
552 */
553static void prepare_write_ack(struct ceph_connection *con)
554{
555 dout("prepare_write_ack %p %llu -> %llu\n", con,
556 con->in_seq_acked, con->in_seq);
557 con->in_seq_acked = con->in_seq;
558
559 con->out_kvec[0].iov_base = &tag_ack;
560 con->out_kvec[0].iov_len = 1;
561 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
562 con->out_kvec[1].iov_base = &con->out_temp_ack;
563 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
564 con->out_kvec_left = 2;
565 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
566 con->out_kvec_cur = con->out_kvec;
567 con->out_more = 1; /* more will follow.. eventually.. */
568 set_bit(WRITE_PENDING, &con->state);
569}
570
571/*
572 * Prepare to write keepalive byte.
573 */
574static void prepare_write_keepalive(struct ceph_connection *con)
575{
576 dout("prepare_write_keepalive %p\n", con);
577 con->out_kvec[0].iov_base = &tag_keepalive;
578 con->out_kvec[0].iov_len = 1;
579 con->out_kvec_left = 1;
580 con->out_kvec_bytes = 1;
581 con->out_kvec_cur = con->out_kvec;
582 set_bit(WRITE_PENDING, &con->state);
583}
584
585/*
586 * Connection negotiation.
587 */
588
589static void prepare_connect_authorizer(struct ceph_connection *con)
590{
591 void *auth_buf;
592 int auth_len = 0;
593 int auth_protocol = 0;
594
595 mutex_unlock(&con->mutex);
596 if (con->ops->get_authorizer)
597 con->ops->get_authorizer(con, &auth_buf, &auth_len,
598 &auth_protocol, &con->auth_reply_buf,
599 &con->auth_reply_buf_len,
600 con->auth_retry);
601 mutex_lock(&con->mutex);
602
603 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
604 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
605
606 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
607 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
608 con->out_kvec_left++;
609 con->out_kvec_bytes += auth_len;
610}
611
612/*
613 * We connected to a peer and are saying hello.
614 */
615static void prepare_write_banner(struct ceph_messenger *msgr,
616 struct ceph_connection *con)
617{
618 int len = strlen(CEPH_BANNER);
619
620 con->out_kvec[0].iov_base = CEPH_BANNER;
621 con->out_kvec[0].iov_len = len;
622 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
623 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
624 con->out_kvec_left = 2;
625 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
626 con->out_kvec_cur = con->out_kvec;
627 con->out_more = 0;
628 set_bit(WRITE_PENDING, &con->state);
629}
630
631static void prepare_write_connect(struct ceph_messenger *msgr,
632 struct ceph_connection *con,
633 int after_banner)
634{
635 unsigned global_seq = get_global_seq(con->msgr, 0);
636 int proto;
637
638 switch (con->peer_name.type) {
639 case CEPH_ENTITY_TYPE_MON:
640 proto = CEPH_MONC_PROTOCOL;
641 break;
642 case CEPH_ENTITY_TYPE_OSD:
643 proto = CEPH_OSDC_PROTOCOL;
644 break;
645 case CEPH_ENTITY_TYPE_MDS:
646 proto = CEPH_MDSC_PROTOCOL;
647 break;
648 default:
649 BUG();
650 }
651
652 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
653 con->connect_seq, global_seq, proto);
654
655 con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED);
656 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
657 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
658 con->out_connect.global_seq = cpu_to_le32(global_seq);
659 con->out_connect.protocol_version = cpu_to_le32(proto);
660 con->out_connect.flags = 0;
661
662 if (!after_banner) {
663 con->out_kvec_left = 0;
664 con->out_kvec_bytes = 0;
665 }
666 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
667 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
668 con->out_kvec_left++;
669 con->out_kvec_bytes += sizeof(con->out_connect);
670 con->out_kvec_cur = con->out_kvec;
671 con->out_more = 0;
672 set_bit(WRITE_PENDING, &con->state);
673
674 prepare_connect_authorizer(con);
675}
676
677
678/*
679 * write as much of pending kvecs to the socket as we can.
680 * 1 -> done
681 * 0 -> socket full, but more to do
682 * <0 -> error
683 */
684static int write_partial_kvec(struct ceph_connection *con)
685{
686 int ret;
687
688 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
689 while (con->out_kvec_bytes > 0) {
690 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
691 con->out_kvec_left, con->out_kvec_bytes,
692 con->out_more);
693 if (ret <= 0)
694 goto out;
695 con->out_kvec_bytes -= ret;
696 if (con->out_kvec_bytes == 0)
697 break; /* done */
698 while (ret > 0) {
699 if (ret >= con->out_kvec_cur->iov_len) {
700 ret -= con->out_kvec_cur->iov_len;
701 con->out_kvec_cur++;
702 con->out_kvec_left--;
703 } else {
704 con->out_kvec_cur->iov_len -= ret;
705 con->out_kvec_cur->iov_base += ret;
706 ret = 0;
707 break;
708 }
709 }
710 }
711 con->out_kvec_left = 0;
712 con->out_kvec_is_msg = false;
713 ret = 1;
714out:
715 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
716 con->out_kvec_bytes, con->out_kvec_left, ret);
717 return ret; /* done! */
718}
719
720#ifdef CONFIG_BLOCK
721static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
722{
723 if (!bio) {
724 *iter = NULL;
725 *seg = 0;
726 return;
727 }
728 *iter = bio;
729 *seg = bio->bi_idx;
730}
731
732static void iter_bio_next(struct bio **bio_iter, int *seg)
733{
734 if (*bio_iter == NULL)
735 return;
736
737 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
738
739 (*seg)++;
740 if (*seg == (*bio_iter)->bi_vcnt)
741 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
742}
743#endif
744
745/*
746 * Write as much message data payload as we can. If we finish, queue
747 * up the footer.
748 * 1 -> done, footer is now queued in out_kvec[].
749 * 0 -> socket full, but more to do
750 * <0 -> error
751 */
752static int write_partial_msg_pages(struct ceph_connection *con)
753{
754 struct ceph_msg *msg = con->out_msg;
755 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
756 size_t len;
757 int crc = con->msgr->nocrc;
758 int ret;
759 int total_max_write;
760 int in_trail = 0;
761 size_t trail_len = (msg->trail ? msg->trail->length : 0);
762
763 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
764 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
765 con->out_msg_pos.page_pos);
766
767#ifdef CONFIG_BLOCK
768 if (msg->bio && !msg->bio_iter)
769 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
770#endif
771
772 while (data_len > con->out_msg_pos.data_pos) {
773 struct page *page = NULL;
774 void *kaddr = NULL;
775 int max_write = PAGE_SIZE;
776 int page_shift = 0;
777
778 total_max_write = data_len - trail_len -
779 con->out_msg_pos.data_pos;
780
781 /*
782 * if we are calculating the data crc (the default), we need
783 * to map the page. if our pages[] has been revoked, use the
784 * zero page.
785 */
786
787 /* have we reached the trail part of the data? */
788 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
789 in_trail = 1;
790
791 total_max_write = data_len - con->out_msg_pos.data_pos;
792
793 page = list_first_entry(&msg->trail->head,
794 struct page, lru);
795 if (crc)
796 kaddr = kmap(page);
797 max_write = PAGE_SIZE;
798 } else if (msg->pages) {
799 page = msg->pages[con->out_msg_pos.page];
800 if (crc)
801 kaddr = kmap(page);
802 } else if (msg->pagelist) {
803 page = list_first_entry(&msg->pagelist->head,
804 struct page, lru);
805 if (crc)
806 kaddr = kmap(page);
807#ifdef CONFIG_BLOCK
808 } else if (msg->bio) {
809 struct bio_vec *bv;
810
811 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
812 page = bv->bv_page;
813 page_shift = bv->bv_offset;
814 if (crc)
815 kaddr = kmap(page) + page_shift;
816 max_write = bv->bv_len;
817#endif
818 } else {
819 page = con->msgr->zero_page;
820 if (crc)
821 kaddr = page_address(con->msgr->zero_page);
822 }
823 len = min_t(int, max_write - con->out_msg_pos.page_pos,
824 total_max_write);
825
826 if (crc && !con->out_msg_pos.did_page_crc) {
827 void *base = kaddr + con->out_msg_pos.page_pos;
828 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
829
830 BUG_ON(kaddr == NULL);
831 con->out_msg->footer.data_crc =
832 cpu_to_le32(crc32c(tmpcrc, base, len));
833 con->out_msg_pos.did_page_crc = 1;
834 }
835 ret = kernel_sendpage(con->sock, page,
836 con->out_msg_pos.page_pos + page_shift,
837 len,
838 MSG_DONTWAIT | MSG_NOSIGNAL |
839 MSG_MORE);
840
841 if (crc &&
842 (msg->pages || msg->pagelist || msg->bio || in_trail))
843 kunmap(page);
844
845 if (ret <= 0)
846 goto out;
847
848 con->out_msg_pos.data_pos += ret;
849 con->out_msg_pos.page_pos += ret;
850 if (ret == len) {
851 con->out_msg_pos.page_pos = 0;
852 con->out_msg_pos.page++;
853 con->out_msg_pos.did_page_crc = 0;
854 if (in_trail)
855 list_move_tail(&page->lru,
856 &msg->trail->head);
857 else if (msg->pagelist)
858 list_move_tail(&page->lru,
859 &msg->pagelist->head);
860#ifdef CONFIG_BLOCK
861 else if (msg->bio)
862 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
863#endif
864 }
865 }
866
867 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
868
869 /* prepare and queue up footer, too */
870 if (!crc)
871 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
872 con->out_kvec_bytes = 0;
873 con->out_kvec_left = 0;
874 con->out_kvec_cur = con->out_kvec;
875 prepare_write_message_footer(con, 0);
876 ret = 1;
877out:
878 return ret;
879}
880
881/*
882 * write some zeros
883 */
884static int write_partial_skip(struct ceph_connection *con)
885{
886 int ret;
887
888 while (con->out_skip > 0) {
889 struct kvec iov = {
890 .iov_base = page_address(con->msgr->zero_page),
891 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
892 };
893
894 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
895 if (ret <= 0)
896 goto out;
897 con->out_skip -= ret;
898 }
899 ret = 1;
900out:
901 return ret;
902}
903
904/*
905 * Prepare to read connection handshake, or an ack.
906 */
907static void prepare_read_banner(struct ceph_connection *con)
908{
909 dout("prepare_read_banner %p\n", con);
910 con->in_base_pos = 0;
911}
912
913static void prepare_read_connect(struct ceph_connection *con)
914{
915 dout("prepare_read_connect %p\n", con);
916 con->in_base_pos = 0;
917}
918
919static void prepare_read_ack(struct ceph_connection *con)
920{
921 dout("prepare_read_ack %p\n", con);
922 con->in_base_pos = 0;
923}
924
925static void prepare_read_tag(struct ceph_connection *con)
926{
927 dout("prepare_read_tag %p\n", con);
928 con->in_base_pos = 0;
929 con->in_tag = CEPH_MSGR_TAG_READY;
930}
931
932/*
933 * Prepare to read a message.
934 */
935static int prepare_read_message(struct ceph_connection *con)
936{
937 dout("prepare_read_message %p\n", con);
938 BUG_ON(con->in_msg != NULL);
939 con->in_base_pos = 0;
940 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
941 return 0;
942}
943
944
945static int read_partial(struct ceph_connection *con,
946 int *to, int size, void *object)
947{
948 *to += size;
949 while (con->in_base_pos < *to) {
950 int left = *to - con->in_base_pos;
951 int have = size - left;
952 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
953 if (ret <= 0)
954 return ret;
955 con->in_base_pos += ret;
956 }
957 return 1;
958}
959
960
961/*
962 * Read all or part of the connect-side handshake on a new connection
963 */
964static int read_partial_banner(struct ceph_connection *con)
965{
966 int ret, to = 0;
967
968 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
969
970 /* peer's banner */
971 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
972 if (ret <= 0)
973 goto out;
974 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
975 &con->actual_peer_addr);
976 if (ret <= 0)
977 goto out;
978 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
979 &con->peer_addr_for_me);
980 if (ret <= 0)
981 goto out;
982out:
983 return ret;
984}
985
986static int read_partial_connect(struct ceph_connection *con)
987{
988 int ret, to = 0;
989
990 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
991
992 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
993 if (ret <= 0)
994 goto out;
995 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
996 con->auth_reply_buf);
997 if (ret <= 0)
998 goto out;
999
1000 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1001 con, (int)con->in_reply.tag,
1002 le32_to_cpu(con->in_reply.connect_seq),
1003 le32_to_cpu(con->in_reply.global_seq));
1004out:
1005 return ret;
1006
1007}
1008
1009/*
1010 * Verify the hello banner looks okay.
1011 */
1012static int verify_hello(struct ceph_connection *con)
1013{
1014 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1015 pr_err("connect to %s got bad banner\n",
1016 pr_addr(&con->peer_addr.in_addr));
1017 con->error_msg = "protocol error, bad banner";
1018 return -1;
1019 }
1020 return 0;
1021}
1022
1023static bool addr_is_blank(struct sockaddr_storage *ss)
1024{
1025 switch (ss->ss_family) {
1026 case AF_INET:
1027 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1028 case AF_INET6:
1029 return
1030 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1031 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1032 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1033 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1034 }
1035 return false;
1036}
1037
1038static int addr_port(struct sockaddr_storage *ss)
1039{
1040 switch (ss->ss_family) {
1041 case AF_INET:
1042 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1043 case AF_INET6:
1044 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1045 }
1046 return 0;
1047}
1048
1049static void addr_set_port(struct sockaddr_storage *ss, int p)
1050{
1051 switch (ss->ss_family) {
1052 case AF_INET:
1053 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1054 case AF_INET6:
1055 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1056 }
1057}
1058
1059/*
1060 * Parse an ip[:port] list into an addr array. Use the default
1061 * monitor port if a port isn't specified.
1062 */
1063int ceph_parse_ips(const char *c, const char *end,
1064 struct ceph_entity_addr *addr,
1065 int max_count, int *count)
1066{
1067 int i;
1068 const char *p = c;
1069
1070 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1071 for (i = 0; i < max_count; i++) {
1072 const char *ipend;
1073 struct sockaddr_storage *ss = &addr[i].in_addr;
1074 struct sockaddr_in *in4 = (void *)ss;
1075 struct sockaddr_in6 *in6 = (void *)ss;
1076 int port;
1077 char delim = ',';
1078
1079 if (*p == '[') {
1080 delim = ']';
1081 p++;
1082 }
1083
1084 memset(ss, 0, sizeof(*ss));
1085 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
1086 delim, &ipend))
1087 ss->ss_family = AF_INET;
1088 else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1089 delim, &ipend))
1090 ss->ss_family = AF_INET6;
1091 else
1092 goto bad;
1093 p = ipend;
1094
1095 if (delim == ']') {
1096 if (*p != ']') {
1097 dout("missing matching ']'\n");
1098 goto bad;
1099 }
1100 p++;
1101 }
1102
1103 /* port? */
1104 if (p < end && *p == ':') {
1105 port = 0;
1106 p++;
1107 while (p < end && *p >= '0' && *p <= '9') {
1108 port = (port * 10) + (*p - '0');
1109 p++;
1110 }
1111 if (port > 65535 || port == 0)
1112 goto bad;
1113 } else {
1114 port = CEPH_MON_PORT;
1115 }
1116
1117 addr_set_port(ss, port);
1118
1119 dout("parse_ips got %s\n", pr_addr(ss));
1120
1121 if (p == end)
1122 break;
1123 if (*p != ',')
1124 goto bad;
1125 p++;
1126 }
1127
1128 if (p != end)
1129 goto bad;
1130
1131 if (count)
1132 *count = i + 1;
1133 return 0;
1134
1135bad:
1136 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1137 return -EINVAL;
1138}
1139
1140static int process_banner(struct ceph_connection *con)
1141{
1142 dout("process_banner on %p\n", con);
1143
1144 if (verify_hello(con) < 0)
1145 return -1;
1146
1147 ceph_decode_addr(&con->actual_peer_addr);
1148 ceph_decode_addr(&con->peer_addr_for_me);
1149
1150 /*
1151 * Make sure the other end is who we wanted. note that the other
1152 * end may not yet know their ip address, so if it's 0.0.0.0, give
1153 * them the benefit of the doubt.
1154 */
1155 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1156 sizeof(con->peer_addr)) != 0 &&
1157 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1158 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1159 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1160 pr_addr(&con->peer_addr.in_addr),
1161 (int)le32_to_cpu(con->peer_addr.nonce),
1162 pr_addr(&con->actual_peer_addr.in_addr),
1163 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1164 con->error_msg = "wrong peer at address";
1165 return -1;
1166 }
1167
1168 /*
1169 * did we learn our address?
1170 */
1171 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1172 int port = addr_port(&con->msgr->inst.addr.in_addr);
1173
1174 memcpy(&con->msgr->inst.addr.in_addr,
1175 &con->peer_addr_for_me.in_addr,
1176 sizeof(con->peer_addr_for_me.in_addr));
1177 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1178 encode_my_addr(con->msgr);
1179 dout("process_banner learned my addr is %s\n",
1180 pr_addr(&con->msgr->inst.addr.in_addr));
1181 }
1182
1183 set_bit(NEGOTIATING, &con->state);
1184 prepare_read_connect(con);
1185 return 0;
1186}
1187
1188static void fail_protocol(struct ceph_connection *con)
1189{
1190 reset_connection(con);
1191 set_bit(CLOSED, &con->state); /* in case there's queued work */
1192
1193 mutex_unlock(&con->mutex);
1194 if (con->ops->bad_proto)
1195 con->ops->bad_proto(con);
1196 mutex_lock(&con->mutex);
1197}
1198
1199static int process_connect(struct ceph_connection *con)
1200{
1201 u64 sup_feat = CEPH_FEATURE_SUPPORTED;
1202 u64 req_feat = CEPH_FEATURE_REQUIRED;
1203 u64 server_feat = le64_to_cpu(con->in_reply.features);
1204
1205 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1206
1207 switch (con->in_reply.tag) {
1208 case CEPH_MSGR_TAG_FEATURES:
1209 pr_err("%s%lld %s feature set mismatch,"
1210 " my %llx < server's %llx, missing %llx\n",
1211 ENTITY_NAME(con->peer_name),
1212 pr_addr(&con->peer_addr.in_addr),
1213 sup_feat, server_feat, server_feat & ~sup_feat);
1214 con->error_msg = "missing required protocol features";
1215 fail_protocol(con);
1216 return -1;
1217
1218 case CEPH_MSGR_TAG_BADPROTOVER:
1219 pr_err("%s%lld %s protocol version mismatch,"
1220 " my %d != server's %d\n",
1221 ENTITY_NAME(con->peer_name),
1222 pr_addr(&con->peer_addr.in_addr),
1223 le32_to_cpu(con->out_connect.protocol_version),
1224 le32_to_cpu(con->in_reply.protocol_version));
1225 con->error_msg = "protocol version mismatch";
1226 fail_protocol(con);
1227 return -1;
1228
1229 case CEPH_MSGR_TAG_BADAUTHORIZER:
1230 con->auth_retry++;
1231 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1232 con->auth_retry);
1233 if (con->auth_retry == 2) {
1234 con->error_msg = "connect authorization failure";
1235 reset_connection(con);
1236 set_bit(CLOSED, &con->state);
1237 return -1;
1238 }
1239 con->auth_retry = 1;
1240 prepare_write_connect(con->msgr, con, 0);
1241 prepare_read_connect(con);
1242 break;
1243
1244 case CEPH_MSGR_TAG_RESETSESSION:
1245 /*
1246 * If we connected with a large connect_seq but the peer
1247 * has no record of a session with us (no connection, or
1248 * connect_seq == 0), they will send RESETSESION to indicate
1249 * that they must have reset their session, and may have
1250 * dropped messages.
1251 */
1252 dout("process_connect got RESET peer seq %u\n",
1253 le32_to_cpu(con->in_connect.connect_seq));
1254 pr_err("%s%lld %s connection reset\n",
1255 ENTITY_NAME(con->peer_name),
1256 pr_addr(&con->peer_addr.in_addr));
1257 reset_connection(con);
1258 prepare_write_connect(con->msgr, con, 0);
1259 prepare_read_connect(con);
1260
1261 /* Tell ceph about it. */
1262 mutex_unlock(&con->mutex);
1263 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1264 if (con->ops->peer_reset)
1265 con->ops->peer_reset(con);
1266 mutex_lock(&con->mutex);
1267 break;
1268
1269 case CEPH_MSGR_TAG_RETRY_SESSION:
1270 /*
1271 * If we sent a smaller connect_seq than the peer has, try
1272 * again with a larger value.
1273 */
1274 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1275 le32_to_cpu(con->out_connect.connect_seq),
1276 le32_to_cpu(con->in_connect.connect_seq));
1277 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1278 prepare_write_connect(con->msgr, con, 0);
1279 prepare_read_connect(con);
1280 break;
1281
1282 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1283 /*
1284 * If we sent a smaller global_seq than the peer has, try
1285 * again with a larger value.
1286 */
1287 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1288 con->peer_global_seq,
1289 le32_to_cpu(con->in_connect.global_seq));
1290 get_global_seq(con->msgr,
1291 le32_to_cpu(con->in_connect.global_seq));
1292 prepare_write_connect(con->msgr, con, 0);
1293 prepare_read_connect(con);
1294 break;
1295
1296 case CEPH_MSGR_TAG_READY:
1297 if (req_feat & ~server_feat) {
1298 pr_err("%s%lld %s protocol feature mismatch,"
1299 " my required %llx > server's %llx, need %llx\n",
1300 ENTITY_NAME(con->peer_name),
1301 pr_addr(&con->peer_addr.in_addr),
1302 req_feat, server_feat, req_feat & ~server_feat);
1303 con->error_msg = "missing required protocol features";
1304 fail_protocol(con);
1305 return -1;
1306 }
1307 clear_bit(CONNECTING, &con->state);
1308 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1309 con->connect_seq++;
1310 con->peer_features = server_feat;
1311 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1312 con->peer_global_seq,
1313 le32_to_cpu(con->in_reply.connect_seq),
1314 con->connect_seq);
1315 WARN_ON(con->connect_seq !=
1316 le32_to_cpu(con->in_reply.connect_seq));
1317
1318 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1319 set_bit(LOSSYTX, &con->state);
1320
1321 prepare_read_tag(con);
1322 break;
1323
1324 case CEPH_MSGR_TAG_WAIT:
1325 /*
1326 * If there is a connection race (we are opening
1327 * connections to each other), one of us may just have
1328 * to WAIT. This shouldn't happen if we are the
1329 * client.
1330 */
1331 pr_err("process_connect peer connecting WAIT\n");
1332
1333 default:
1334 pr_err("connect protocol error, will retry\n");
1335 con->error_msg = "protocol error, garbage tag during connect";
1336 return -1;
1337 }
1338 return 0;
1339}
1340
1341
1342/*
1343 * read (part of) an ack
1344 */
1345static int read_partial_ack(struct ceph_connection *con)
1346{
1347 int to = 0;
1348
1349 return read_partial(con, &to, sizeof(con->in_temp_ack),
1350 &con->in_temp_ack);
1351}
1352
1353
1354/*
1355 * We can finally discard anything that's been acked.
1356 */
1357static void process_ack(struct ceph_connection *con)
1358{
1359 struct ceph_msg *m;
1360 u64 ack = le64_to_cpu(con->in_temp_ack);
1361 u64 seq;
1362
1363 while (!list_empty(&con->out_sent)) {
1364 m = list_first_entry(&con->out_sent, struct ceph_msg,
1365 list_head);
1366 seq = le64_to_cpu(m->hdr.seq);
1367 if (seq > ack)
1368 break;
1369 dout("got ack for seq %llu type %d at %p\n", seq,
1370 le16_to_cpu(m->hdr.type), m);
1371 ceph_msg_remove(m);
1372 }
1373 prepare_read_tag(con);
1374}
1375
1376
1377
1378
1379static int read_partial_message_section(struct ceph_connection *con,
1380 struct kvec *section,
1381 unsigned int sec_len, u32 *crc)
1382{
1383 int ret, left;
1384
1385 BUG_ON(!section);
1386
1387 while (section->iov_len < sec_len) {
1388 BUG_ON(section->iov_base == NULL);
1389 left = sec_len - section->iov_len;
1390 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1391 section->iov_len, left);
1392 if (ret <= 0)
1393 return ret;
1394 section->iov_len += ret;
1395 if (section->iov_len == sec_len)
1396 *crc = crc32c(0, section->iov_base,
1397 section->iov_len);
1398 }
1399
1400 return 1;
1401}
1402
1403static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1404 struct ceph_msg_header *hdr,
1405 int *skip);
1406
1407
1408static int read_partial_message_pages(struct ceph_connection *con,
1409 struct page **pages,
1410 unsigned data_len, int datacrc)
1411{
1412 void *p;
1413 int ret;
1414 int left;
1415
1416 left = min((int)(data_len - con->in_msg_pos.data_pos),
1417 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1418 /* (page) data */
1419 BUG_ON(pages == NULL);
1420 p = kmap(pages[con->in_msg_pos.page]);
1421 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1422 left);
1423 if (ret > 0 && datacrc)
1424 con->in_data_crc =
1425 crc32c(con->in_data_crc,
1426 p + con->in_msg_pos.page_pos, ret);
1427 kunmap(pages[con->in_msg_pos.page]);
1428 if (ret <= 0)
1429 return ret;
1430 con->in_msg_pos.data_pos += ret;
1431 con->in_msg_pos.page_pos += ret;
1432 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1433 con->in_msg_pos.page_pos = 0;
1434 con->in_msg_pos.page++;
1435 }
1436
1437 return ret;
1438}
1439
1440#ifdef CONFIG_BLOCK
1441static int read_partial_message_bio(struct ceph_connection *con,
1442 struct bio **bio_iter, int *bio_seg,
1443 unsigned data_len, int datacrc)
1444{
1445 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1446 void *p;
1447 int ret, left;
1448
1449 if (IS_ERR(bv))
1450 return PTR_ERR(bv);
1451
1452 left = min((int)(data_len - con->in_msg_pos.data_pos),
1453 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1454
1455 p = kmap(bv->bv_page) + bv->bv_offset;
1456
1457 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1458 left);
1459 if (ret > 0 && datacrc)
1460 con->in_data_crc =
1461 crc32c(con->in_data_crc,
1462 p + con->in_msg_pos.page_pos, ret);
1463 kunmap(bv->bv_page);
1464 if (ret <= 0)
1465 return ret;
1466 con->in_msg_pos.data_pos += ret;
1467 con->in_msg_pos.page_pos += ret;
1468 if (con->in_msg_pos.page_pos == bv->bv_len) {
1469 con->in_msg_pos.page_pos = 0;
1470 iter_bio_next(bio_iter, bio_seg);
1471 }
1472
1473 return ret;
1474}
1475#endif
1476
1477/*
1478 * read (part of) a message.
1479 */
1480static int read_partial_message(struct ceph_connection *con)
1481{
1482 struct ceph_msg *m = con->in_msg;
1483 int ret;
1484 int to, left;
1485 unsigned front_len, middle_len, data_len, data_off;
1486 int datacrc = con->msgr->nocrc;
1487 int skip;
1488 u64 seq;
1489
1490 dout("read_partial_message con %p msg %p\n", con, m);
1491
1492 /* header */
1493 while (con->in_base_pos < sizeof(con->in_hdr)) {
1494 left = sizeof(con->in_hdr) - con->in_base_pos;
1495 ret = ceph_tcp_recvmsg(con->sock,
1496 (char *)&con->in_hdr + con->in_base_pos,
1497 left);
1498 if (ret <= 0)
1499 return ret;
1500 con->in_base_pos += ret;
1501 if (con->in_base_pos == sizeof(con->in_hdr)) {
1502 u32 crc = crc32c(0, (void *)&con->in_hdr,
1503 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1504 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1505 pr_err("read_partial_message bad hdr "
1506 " crc %u != expected %u\n",
1507 crc, con->in_hdr.crc);
1508 return -EBADMSG;
1509 }
1510 }
1511 }
1512 front_len = le32_to_cpu(con->in_hdr.front_len);
1513 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1514 return -EIO;
1515 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1516 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1517 return -EIO;
1518 data_len = le32_to_cpu(con->in_hdr.data_len);
1519 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1520 return -EIO;
1521 data_off = le16_to_cpu(con->in_hdr.data_off);
1522
1523 /* verify seq# */
1524 seq = le64_to_cpu(con->in_hdr.seq);
1525 if ((s64)seq - (s64)con->in_seq < 1) {
1526 pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
1527 ENTITY_NAME(con->peer_name),
1528 pr_addr(&con->peer_addr.in_addr),
1529 seq, con->in_seq + 1);
1530 con->in_base_pos = -front_len - middle_len - data_len -
1531 sizeof(m->footer);
1532 con->in_tag = CEPH_MSGR_TAG_READY;
1533 con->in_seq++;
1534 return 0;
1535 } else if ((s64)seq - (s64)con->in_seq > 1) {
1536 pr_err("read_partial_message bad seq %lld expected %lld\n",
1537 seq, con->in_seq + 1);
1538 con->error_msg = "bad message sequence # for incoming message";
1539 return -EBADMSG;
1540 }
1541
1542 /* allocate message? */
1543 if (!con->in_msg) {
1544 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1545 con->in_hdr.front_len, con->in_hdr.data_len);
1546 skip = 0;
1547 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1548 if (skip) {
1549 /* skip this message */
1550 dout("alloc_msg said skip message\n");
1551 BUG_ON(con->in_msg);
1552 con->in_base_pos = -front_len - middle_len - data_len -
1553 sizeof(m->footer);
1554 con->in_tag = CEPH_MSGR_TAG_READY;
1555 con->in_seq++;
1556 return 0;
1557 }
1558 if (!con->in_msg) {
1559 con->error_msg =
1560 "error allocating memory for incoming message";
1561 return -ENOMEM;
1562 }
1563 m = con->in_msg;
1564 m->front.iov_len = 0; /* haven't read it yet */
1565 if (m->middle)
1566 m->middle->vec.iov_len = 0;
1567
1568 con->in_msg_pos.page = 0;
1569 if (m->pages)
1570 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
1571 else
1572 con->in_msg_pos.page_pos = 0;
1573 con->in_msg_pos.data_pos = 0;
1574 }
1575
1576 /* front */
1577 ret = read_partial_message_section(con, &m->front, front_len,
1578 &con->in_front_crc);
1579 if (ret <= 0)
1580 return ret;
1581
1582 /* middle */
1583 if (m->middle) {
1584 ret = read_partial_message_section(con, &m->middle->vec,
1585 middle_len,
1586 &con->in_middle_crc);
1587 if (ret <= 0)
1588 return ret;
1589 }
1590#ifdef CONFIG_BLOCK
1591 if (m->bio && !m->bio_iter)
1592 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1593#endif
1594
1595 /* (page) data */
1596 while (con->in_msg_pos.data_pos < data_len) {
1597 if (m->pages) {
1598 ret = read_partial_message_pages(con, m->pages,
1599 data_len, datacrc);
1600 if (ret <= 0)
1601 return ret;
1602#ifdef CONFIG_BLOCK
1603 } else if (m->bio) {
1604
1605 ret = read_partial_message_bio(con,
1606 &m->bio_iter, &m->bio_seg,
1607 data_len, datacrc);
1608 if (ret <= 0)
1609 return ret;
1610#endif
1611 } else {
1612 BUG_ON(1);
1613 }
1614 }
1615
1616 /* footer */
1617 to = sizeof(m->hdr) + sizeof(m->footer);
1618 while (con->in_base_pos < to) {
1619 left = to - con->in_base_pos;
1620 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1621 (con->in_base_pos - sizeof(m->hdr)),
1622 left);
1623 if (ret <= 0)
1624 return ret;
1625 con->in_base_pos += ret;
1626 }
1627 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1628 m, front_len, m->footer.front_crc, middle_len,
1629 m->footer.middle_crc, data_len, m->footer.data_crc);
1630
1631 /* crc ok? */
1632 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1633 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1634 m, con->in_front_crc, m->footer.front_crc);
1635 return -EBADMSG;
1636 }
1637 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1638 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1639 m, con->in_middle_crc, m->footer.middle_crc);
1640 return -EBADMSG;
1641 }
1642 if (datacrc &&
1643 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1644 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1645 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1646 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1647 return -EBADMSG;
1648 }
1649
1650 return 1; /* done! */
1651}
1652
1653/*
1654 * Process message. This happens in the worker thread. The callback should
1655 * be careful not to do anything that waits on other incoming messages or it
1656 * may deadlock.
1657 */
1658static void process_message(struct ceph_connection *con)
1659{
1660 struct ceph_msg *msg;
1661
1662 msg = con->in_msg;
1663 con->in_msg = NULL;
1664
1665 /* if first message, set peer_name */
1666 if (con->peer_name.type == 0)
1667 con->peer_name = msg->hdr.src;
1668
1669 con->in_seq++;
1670 mutex_unlock(&con->mutex);
1671
1672 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1673 msg, le64_to_cpu(msg->hdr.seq),
1674 ENTITY_NAME(msg->hdr.src),
1675 le16_to_cpu(msg->hdr.type),
1676 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1677 le32_to_cpu(msg->hdr.front_len),
1678 le32_to_cpu(msg->hdr.data_len),
1679 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1680 con->ops->dispatch(con, msg);
1681
1682 mutex_lock(&con->mutex);
1683 prepare_read_tag(con);
1684}
1685
1686
1687/*
1688 * Write something to the socket. Called in a worker thread when the
1689 * socket appears to be writeable and we have something ready to send.
1690 */
1691static int try_write(struct ceph_connection *con)
1692{
1693 struct ceph_messenger *msgr = con->msgr;
1694 int ret = 1;
1695
1696 dout("try_write start %p state %lu nref %d\n", con, con->state,
1697 atomic_read(&con->nref));
1698
1699more:
1700 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1701
1702 /* open the socket first? */
1703 if (con->sock == NULL) {
1704 /*
1705 * if we were STANDBY and are reconnecting _this_
1706 * connection, bump connect_seq now. Always bump
1707 * global_seq.
1708 */
1709 if (test_and_clear_bit(STANDBY, &con->state))
1710 con->connect_seq++;
1711
1712 prepare_write_banner(msgr, con);
1713 prepare_write_connect(msgr, con, 1);
1714 prepare_read_banner(con);
1715 set_bit(CONNECTING, &con->state);
1716 clear_bit(NEGOTIATING, &con->state);
1717
1718 BUG_ON(con->in_msg);
1719 con->in_tag = CEPH_MSGR_TAG_READY;
1720 dout("try_write initiating connect on %p new state %lu\n",
1721 con, con->state);
1722 con->sock = ceph_tcp_connect(con);
1723 if (IS_ERR(con->sock)) {
1724 con->sock = NULL;
1725 con->error_msg = "connect error";
1726 ret = -1;
1727 goto out;
1728 }
1729 }
1730
1731more_kvec:
1732 /* kvec data queued? */
1733 if (con->out_skip) {
1734 ret = write_partial_skip(con);
1735 if (ret <= 0)
1736 goto done;
1737 if (ret < 0) {
1738 dout("try_write write_partial_skip err %d\n", ret);
1739 goto done;
1740 }
1741 }
1742 if (con->out_kvec_left) {
1743 ret = write_partial_kvec(con);
1744 if (ret <= 0)
1745 goto done;
1746 }
1747
1748 /* msg pages? */
1749 if (con->out_msg) {
1750 if (con->out_msg_done) {
1751 ceph_msg_put(con->out_msg);
1752 con->out_msg = NULL; /* we're done with this one */
1753 goto do_next;
1754 }
1755
1756 ret = write_partial_msg_pages(con);
1757 if (ret == 1)
1758 goto more_kvec; /* we need to send the footer, too! */
1759 if (ret == 0)
1760 goto done;
1761 if (ret < 0) {
1762 dout("try_write write_partial_msg_pages err %d\n",
1763 ret);
1764 goto done;
1765 }
1766 }
1767
1768do_next:
1769 if (!test_bit(CONNECTING, &con->state)) {
1770 /* is anything else pending? */
1771 if (!list_empty(&con->out_queue)) {
1772 prepare_write_message(con);
1773 goto more;
1774 }
1775 if (con->in_seq > con->in_seq_acked) {
1776 prepare_write_ack(con);
1777 goto more;
1778 }
1779 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1780 prepare_write_keepalive(con);
1781 goto more;
1782 }
1783 }
1784
1785 /* Nothing to do! */
1786 clear_bit(WRITE_PENDING, &con->state);
1787 dout("try_write nothing else to write.\n");
1788done:
1789 ret = 0;
1790out:
1791 dout("try_write done on %p\n", con);
1792 return ret;
1793}
1794
1795
1796
1797/*
1798 * Read what we can from the socket.
1799 */
1800static int try_read(struct ceph_connection *con)
1801{
1802 int ret = -1;
1803
1804 if (!con->sock)
1805 return 0;
1806
1807 if (test_bit(STANDBY, &con->state))
1808 return 0;
1809
1810 dout("try_read start on %p\n", con);
1811
1812more:
1813 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1814 con->in_base_pos);
1815 if (test_bit(CONNECTING, &con->state)) {
1816 if (!test_bit(NEGOTIATING, &con->state)) {
1817 dout("try_read connecting\n");
1818 ret = read_partial_banner(con);
1819 if (ret <= 0)
1820 goto done;
1821 if (process_banner(con) < 0) {
1822 ret = -1;
1823 goto out;
1824 }
1825 }
1826 ret = read_partial_connect(con);
1827 if (ret <= 0)
1828 goto done;
1829 if (process_connect(con) < 0) {
1830 ret = -1;
1831 goto out;
1832 }
1833 goto more;
1834 }
1835
1836 if (con->in_base_pos < 0) {
1837 /*
1838 * skipping + discarding content.
1839 *
1840 * FIXME: there must be a better way to do this!
1841 */
1842 static char buf[1024];
1843 int skip = min(1024, -con->in_base_pos);
1844 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1845 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1846 if (ret <= 0)
1847 goto done;
1848 con->in_base_pos += ret;
1849 if (con->in_base_pos)
1850 goto more;
1851 }
1852 if (con->in_tag == CEPH_MSGR_TAG_READY) {
1853 /*
1854 * what's next?
1855 */
1856 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1857 if (ret <= 0)
1858 goto done;
1859 dout("try_read got tag %d\n", (int)con->in_tag);
1860 switch (con->in_tag) {
1861 case CEPH_MSGR_TAG_MSG:
1862 prepare_read_message(con);
1863 break;
1864 case CEPH_MSGR_TAG_ACK:
1865 prepare_read_ack(con);
1866 break;
1867 case CEPH_MSGR_TAG_CLOSE:
1868 set_bit(CLOSED, &con->state); /* fixme */
1869 goto done;
1870 default:
1871 goto bad_tag;
1872 }
1873 }
1874 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
1875 ret = read_partial_message(con);
1876 if (ret <= 0) {
1877 switch (ret) {
1878 case -EBADMSG:
1879 con->error_msg = "bad crc";
1880 ret = -EIO;
1881 goto out;
1882 case -EIO:
1883 con->error_msg = "io error";
1884 goto out;
1885 default:
1886 goto done;
1887 }
1888 }
1889 if (con->in_tag == CEPH_MSGR_TAG_READY)
1890 goto more;
1891 process_message(con);
1892 goto more;
1893 }
1894 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1895 ret = read_partial_ack(con);
1896 if (ret <= 0)
1897 goto done;
1898 process_ack(con);
1899 goto more;
1900 }
1901
1902done:
1903 ret = 0;
1904out:
1905 dout("try_read done on %p\n", con);
1906 return ret;
1907
1908bad_tag:
1909 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
1910 con->error_msg = "protocol error, garbage tag";
1911 ret = -1;
1912 goto out;
1913}
1914
1915
1916/*
1917 * Atomically queue work on a connection. Bump @con reference to
1918 * avoid races with connection teardown.
1919 *
1920 * There is some trickery going on with QUEUED and BUSY because we
1921 * only want a _single_ thread operating on each connection at any
1922 * point in time, but we want to use all available CPUs.
1923 *
1924 * The worker thread only proceeds if it can atomically set BUSY. It
1925 * clears QUEUED and does it's thing. When it thinks it's done, it
1926 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1927 * (tries again to set BUSY).
1928 *
1929 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1930 * try to queue work. If that fails (work is already queued, or BUSY)
1931 * we give up (work also already being done or is queued) but leave QUEUED
1932 * set so that the worker thread will loop if necessary.
1933 */
1934static void queue_con(struct ceph_connection *con)
1935{
1936 if (test_bit(DEAD, &con->state)) {
1937 dout("queue_con %p ignoring: DEAD\n",
1938 con);
1939 return;
1940 }
1941
1942 if (!con->ops->get(con)) {
1943 dout("queue_con %p ref count 0\n", con);
1944 return;
1945 }
1946
1947 set_bit(QUEUED, &con->state);
1948 if (test_bit(BUSY, &con->state)) {
1949 dout("queue_con %p - already BUSY\n", con);
1950 con->ops->put(con);
1951 } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
1952 dout("queue_con %p - already queued\n", con);
1953 con->ops->put(con);
1954 } else {
1955 dout("queue_con %p\n", con);
1956 }
1957}
1958
1959/*
1960 * Do some work on a connection. Drop a connection ref when we're done.
1961 */
1962static void con_work(struct work_struct *work)
1963{
1964 struct ceph_connection *con = container_of(work, struct ceph_connection,
1965 work.work);
1966 int backoff = 0;
1967
1968more:
1969 if (test_and_set_bit(BUSY, &con->state) != 0) {
1970 dout("con_work %p BUSY already set\n", con);
1971 goto out;
1972 }
1973 dout("con_work %p start, clearing QUEUED\n", con);
1974 clear_bit(QUEUED, &con->state);
1975
1976 mutex_lock(&con->mutex);
1977
1978 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1979 dout("con_work CLOSED\n");
1980 con_close_socket(con);
1981 goto done;
1982 }
1983 if (test_and_clear_bit(OPENING, &con->state)) {
1984 /* reopen w/ new peer */
1985 dout("con_work OPENING\n");
1986 con_close_socket(con);
1987 }
1988
1989 if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
1990 try_read(con) < 0 ||
1991 try_write(con) < 0) {
1992 mutex_unlock(&con->mutex);
1993 backoff = 1;
1994 ceph_fault(con); /* error/fault path */
1995 goto done_unlocked;
1996 }
1997
1998done:
1999 mutex_unlock(&con->mutex);
2000
2001done_unlocked:
2002 clear_bit(BUSY, &con->state);
2003 dout("con->state=%lu\n", con->state);
2004 if (test_bit(QUEUED, &con->state)) {
2005 if (!backoff || test_bit(OPENING, &con->state)) {
2006 dout("con_work %p QUEUED reset, looping\n", con);
2007 goto more;
2008 }
2009 dout("con_work %p QUEUED reset, but just faulted\n", con);
2010 clear_bit(QUEUED, &con->state);
2011 }
2012 dout("con_work %p done\n", con);
2013
2014out:
2015 con->ops->put(con);
2016}
2017
2018
2019/*
2020 * Generic error/fault handler. A retry mechanism is used with
2021 * exponential backoff
2022 */
2023static void ceph_fault(struct ceph_connection *con)
2024{
2025 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2026 pr_addr(&con->peer_addr.in_addr), con->error_msg);
2027 dout("fault %p state %lu to peer %s\n",
2028 con, con->state, pr_addr(&con->peer_addr.in_addr));
2029
2030 if (test_bit(LOSSYTX, &con->state)) {
2031 dout("fault on LOSSYTX channel\n");
2032 goto out;
2033 }
2034
2035 mutex_lock(&con->mutex);
2036 if (test_bit(CLOSED, &con->state))
2037 goto out_unlock;
2038
2039 con_close_socket(con);
2040
2041 if (con->in_msg) {
2042 ceph_msg_put(con->in_msg);
2043 con->in_msg = NULL;
2044 }
2045
2046 /* Requeue anything that hasn't been acked */
2047 list_splice_init(&con->out_sent, &con->out_queue);
2048
2049 /* If there are no messages in the queue, place the connection
2050 * in a STANDBY state (i.e., don't try to reconnect just yet). */
2051 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
2052 dout("fault setting STANDBY\n");
2053 set_bit(STANDBY, &con->state);
2054 } else {
2055 /* retry after a delay. */
2056 if (con->delay == 0)
2057 con->delay = BASE_DELAY_INTERVAL;
2058 else if (con->delay < MAX_DELAY_INTERVAL)
2059 con->delay *= 2;
2060 dout("fault queueing %p delay %lu\n", con, con->delay);
2061 con->ops->get(con);
2062 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2063 round_jiffies_relative(con->delay)) == 0)
2064 con->ops->put(con);
2065 }
2066
2067out_unlock:
2068 mutex_unlock(&con->mutex);
2069out:
2070 /*
2071 * in case we faulted due to authentication, invalidate our
2072 * current tickets so that we can get new ones.
2073 */
2074 if (con->auth_retry && con->ops->invalidate_authorizer) {
2075 dout("calling invalidate_authorizer()\n");
2076 con->ops->invalidate_authorizer(con);
2077 }
2078
2079 if (con->ops->fault)
2080 con->ops->fault(con);
2081}
2082
2083
2084
2085/*
2086 * create a new messenger instance
2087 */
2088struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
2089{
2090 struct ceph_messenger *msgr;
2091
2092 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
2093 if (msgr == NULL)
2094 return ERR_PTR(-ENOMEM);
2095
2096 spin_lock_init(&msgr->global_seq_lock);
2097
2098 /* the zero page is needed if a request is "canceled" while the message
2099 * is being written over the socket */
2100 msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
2101 if (!msgr->zero_page) {
2102 kfree(msgr);
2103 return ERR_PTR(-ENOMEM);
2104 }
2105 kmap(msgr->zero_page);
2106
2107 if (myaddr)
2108 msgr->inst.addr = *myaddr;
2109
2110 /* select a random nonce */
2111 msgr->inst.addr.type = 0;
2112 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2113 encode_my_addr(msgr);
2114
2115 dout("messenger_create %p\n", msgr);
2116 return msgr;
2117}
2118
2119void ceph_messenger_destroy(struct ceph_messenger *msgr)
2120{
2121 dout("destroy %p\n", msgr);
2122 kunmap(msgr->zero_page);
2123 __free_page(msgr->zero_page);
2124 kfree(msgr);
2125 dout("destroyed messenger %p\n", msgr);
2126}
2127
2128/*
2129 * Queue up an outgoing message on the given connection.
2130 */
2131void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2132{
2133 if (test_bit(CLOSED, &con->state)) {
2134 dout("con_send %p closed, dropping %p\n", con, msg);
2135 ceph_msg_put(msg);
2136 return;
2137 }
2138
2139 /* set src+dst */
2140 msg->hdr.src = con->msgr->inst.name;
2141
2142 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2143
2144 msg->needs_out_seq = true;
2145
2146 /* queue */
2147 mutex_lock(&con->mutex);
2148 BUG_ON(!list_empty(&msg->list_head));
2149 list_add_tail(&msg->list_head, &con->out_queue);
2150 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2151 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2152 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2153 le32_to_cpu(msg->hdr.front_len),
2154 le32_to_cpu(msg->hdr.middle_len),
2155 le32_to_cpu(msg->hdr.data_len));
2156 mutex_unlock(&con->mutex);
2157
2158 /* if there wasn't anything waiting to send before, queue
2159 * new work */
2160 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2161 queue_con(con);
2162}
2163
2164/*
2165 * Revoke a message that was previously queued for send
2166 */
2167void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2168{
2169 mutex_lock(&con->mutex);
2170 if (!list_empty(&msg->list_head)) {
2171 dout("con_revoke %p msg %p - was on queue\n", con, msg);
2172 list_del_init(&msg->list_head);
2173 ceph_msg_put(msg);
2174 msg->hdr.seq = 0;
2175 }
2176 if (con->out_msg == msg) {
2177 dout("con_revoke %p msg %p - was sending\n", con, msg);
2178 con->out_msg = NULL;
2179 if (con->out_kvec_is_msg) {
2180 con->out_skip = con->out_kvec_bytes;
2181 con->out_kvec_is_msg = false;
2182 }
2183 ceph_msg_put(msg);
2184 msg->hdr.seq = 0;
2185 }
2186 mutex_unlock(&con->mutex);
2187}
2188
2189/*
2190 * Revoke a message that we may be reading data into
2191 */
2192void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2193{
2194 mutex_lock(&con->mutex);
2195 if (con->in_msg && con->in_msg == msg) {
2196 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2197 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2198 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2199
2200 /* skip rest of message */
2201 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2202 con->in_base_pos = con->in_base_pos -
2203 sizeof(struct ceph_msg_header) -
2204 front_len -
2205 middle_len -
2206 data_len -
2207 sizeof(struct ceph_msg_footer);
2208 ceph_msg_put(con->in_msg);
2209 con->in_msg = NULL;
2210 con->in_tag = CEPH_MSGR_TAG_READY;
2211 con->in_seq++;
2212 } else {
2213 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2214 con, con->in_msg, msg);
2215 }
2216 mutex_unlock(&con->mutex);
2217}
2218
2219/*
2220 * Queue a keepalive byte to ensure the tcp connection is alive.
2221 */
2222void ceph_con_keepalive(struct ceph_connection *con)
2223{
2224 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2225 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2226 queue_con(con);
2227}
2228
2229
2230/*
2231 * construct a new message with given type, size
2232 * the new msg has a ref count of 1.
2233 */
2234struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2235{
2236 struct ceph_msg *m;
2237
2238 m = kmalloc(sizeof(*m), flags);
2239 if (m == NULL)
2240 goto out;
2241 kref_init(&m->kref);
2242 INIT_LIST_HEAD(&m->list_head);
2243
2244 m->hdr.tid = 0;
2245 m->hdr.type = cpu_to_le16(type);
2246 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2247 m->hdr.version = 0;
2248 m->hdr.front_len = cpu_to_le32(front_len);
2249 m->hdr.middle_len = 0;
2250 m->hdr.data_len = 0;
2251 m->hdr.data_off = 0;
2252 m->hdr.reserved = 0;
2253 m->footer.front_crc = 0;
2254 m->footer.middle_crc = 0;
2255 m->footer.data_crc = 0;
2256 m->footer.flags = 0;
2257 m->front_max = front_len;
2258 m->front_is_vmalloc = false;
2259 m->more_to_follow = false;
2260 m->pool = NULL;
2261
2262 /* front */
2263 if (front_len) {
2264 if (front_len > PAGE_CACHE_SIZE) {
2265 m->front.iov_base = __vmalloc(front_len, flags,
2266 PAGE_KERNEL);
2267 m->front_is_vmalloc = true;
2268 } else {
2269 m->front.iov_base = kmalloc(front_len, flags);
2270 }
2271 if (m->front.iov_base == NULL) {
2272 pr_err("msg_new can't allocate %d bytes\n",
2273 front_len);
2274 goto out2;
2275 }
2276 } else {
2277 m->front.iov_base = NULL;
2278 }
2279 m->front.iov_len = front_len;
2280
2281 /* middle */
2282 m->middle = NULL;
2283
2284 /* data */
2285 m->nr_pages = 0;
2286 m->pages = NULL;
2287 m->pagelist = NULL;
2288 m->bio = NULL;
2289 m->bio_iter = NULL;
2290 m->bio_seg = 0;
2291 m->trail = NULL;
2292
2293 dout("ceph_msg_new %p front %d\n", m, front_len);
2294 return m;
2295
2296out2:
2297 ceph_msg_put(m);
2298out:
2299 pr_err("msg_new can't create type %d front %d\n", type, front_len);
2300 return NULL;
2301}
2302
2303/*
2304 * Allocate "middle" portion of a message, if it is needed and wasn't
2305 * allocated by alloc_msg. This allows us to read a small fixed-size
2306 * per-type header in the front and then gracefully fail (i.e.,
2307 * propagate the error to the caller based on info in the front) when
2308 * the middle is too large.
2309 */
2310static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2311{
2312 int type = le16_to_cpu(msg->hdr.type);
2313 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2314
2315 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2316 ceph_msg_type_name(type), middle_len);
2317 BUG_ON(!middle_len);
2318 BUG_ON(msg->middle);
2319
2320 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2321 if (!msg->middle)
2322 return -ENOMEM;
2323 return 0;
2324}
2325
2326/*
2327 * Generic message allocator, for incoming messages.
2328 */
2329static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2330 struct ceph_msg_header *hdr,
2331 int *skip)
2332{
2333 int type = le16_to_cpu(hdr->type);
2334 int front_len = le32_to_cpu(hdr->front_len);
2335 int middle_len = le32_to_cpu(hdr->middle_len);
2336 struct ceph_msg *msg = NULL;
2337 int ret;
2338
2339 if (con->ops->alloc_msg) {
2340 mutex_unlock(&con->mutex);
2341 msg = con->ops->alloc_msg(con, hdr, skip);
2342 mutex_lock(&con->mutex);
2343 if (!msg || *skip)
2344 return NULL;
2345 }
2346 if (!msg) {
2347 *skip = 0;
2348 msg = ceph_msg_new(type, front_len, GFP_NOFS);
2349 if (!msg) {
2350 pr_err("unable to allocate msg type %d len %d\n",
2351 type, front_len);
2352 return NULL;
2353 }
2354 }
2355 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2356
2357 if (middle_len && !msg->middle) {
2358 ret = ceph_alloc_middle(con, msg);
2359 if (ret < 0) {
2360 ceph_msg_put(msg);
2361 return NULL;
2362 }
2363 }
2364
2365 return msg;
2366}
2367
2368
2369/*
2370 * Free a generically kmalloc'd message.
2371 */
2372void ceph_msg_kfree(struct ceph_msg *m)
2373{
2374 dout("msg_kfree %p\n", m);
2375 if (m->front_is_vmalloc)
2376 vfree(m->front.iov_base);
2377 else
2378 kfree(m->front.iov_base);
2379 kfree(m);
2380}
2381
2382/*
2383 * Drop a msg ref. Destroy as needed.
2384 */
2385void ceph_msg_last_put(struct kref *kref)
2386{
2387 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2388
2389 dout("ceph_msg_put last one on %p\n", m);
2390 WARN_ON(!list_empty(&m->list_head));
2391
2392 /* drop middle, data, if any */
2393 if (m->middle) {
2394 ceph_buffer_put(m->middle);
2395 m->middle = NULL;
2396 }
2397 m->nr_pages = 0;
2398 m->pages = NULL;
2399
2400 if (m->pagelist) {
2401 ceph_pagelist_release(m->pagelist);
2402 kfree(m->pagelist);
2403 m->pagelist = NULL;
2404 }
2405
2406 m->trail = NULL;
2407
2408 if (m->pool)
2409 ceph_msgpool_put(m->pool, m);
2410 else
2411 ceph_msg_kfree(m);
2412}
2413
2414void ceph_msg_dump(struct ceph_msg *msg)
2415{
2416 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2417 msg->front_max, msg->nr_pages);
2418 print_hex_dump(KERN_DEBUG, "header: ",
2419 DUMP_PREFIX_OFFSET, 16, 1,
2420 &msg->hdr, sizeof(msg->hdr), true);
2421 print_hex_dump(KERN_DEBUG, " front: ",
2422 DUMP_PREFIX_OFFSET, 16, 1,
2423 msg->front.iov_base, msg->front.iov_len, true);
2424 if (msg->middle)
2425 print_hex_dump(KERN_DEBUG, "middle: ",
2426 DUMP_PREFIX_OFFSET, 16, 1,
2427 msg->middle->vec.iov_base,
2428 msg->middle->vec.iov_len, true);
2429 print_hex_dump(KERN_DEBUG, "footer: ",
2430 DUMP_PREFIX_OFFSET, 16, 1,
2431 &msg->footer, sizeof(msg->footer), true);
2432}
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
deleted file mode 100644
index 5a79450604ef..000000000000
--- a/fs/ceph/messenger.h
+++ /dev/null
@@ -1,257 +0,0 @@
1#ifndef __FS_CEPH_MESSENGER_H
2#define __FS_CEPH_MESSENGER_H
3
4#include <linux/kref.h>
5#include <linux/mutex.h>
6#include <linux/net.h>
7#include <linux/radix-tree.h>
8#include <linux/uio.h>
9#include <linux/version.h>
10#include <linux/workqueue.h>
11
12#include "types.h"
13#include "buffer.h"
14
15struct ceph_msg;
16struct ceph_connection;
17
18extern struct workqueue_struct *ceph_msgr_wq; /* receive work queue */
19
20/*
21 * Ceph defines these callbacks for handling connection events.
22 */
23struct ceph_connection_operations {
24 struct ceph_connection *(*get)(struct ceph_connection *);
25 void (*put)(struct ceph_connection *);
26
27 /* handle an incoming message. */
28 void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m);
29
30 /* authorize an outgoing connection */
31 int (*get_authorizer) (struct ceph_connection *con,
32 void **buf, int *len, int *proto,
33 void **reply_buf, int *reply_len, int force_new);
34 int (*verify_authorizer_reply) (struct ceph_connection *con, int len);
35 int (*invalidate_authorizer)(struct ceph_connection *con);
36
37 /* protocol version mismatch */
38 void (*bad_proto) (struct ceph_connection *con);
39
40 /* there was some error on the socket (disconnect, whatever) */
41 void (*fault) (struct ceph_connection *con);
42
43 /* a remote host as terminated a message exchange session, and messages
44 * we sent (or they tried to send us) may be lost. */
45 void (*peer_reset) (struct ceph_connection *con);
46
47 struct ceph_msg * (*alloc_msg) (struct ceph_connection *con,
48 struct ceph_msg_header *hdr,
49 int *skip);
50};
51
52/* use format string %s%d */
53#define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num)
54
55struct ceph_messenger {
56 struct ceph_entity_inst inst; /* my name+address */
57 struct ceph_entity_addr my_enc_addr;
58 struct page *zero_page; /* used in certain error cases */
59
60 bool nocrc;
61
62 /*
63 * the global_seq counts connections i (attempt to) initiate
64 * in order to disambiguate certain connect race conditions.
65 */
66 u32 global_seq;
67 spinlock_t global_seq_lock;
68};
69
70/*
71 * a single message. it contains a header (src, dest, message type, etc.),
72 * footer (crc values, mainly), a "front" message body, and possibly a
73 * data payload (stored in some number of pages).
74 */
75struct ceph_msg {
76 struct ceph_msg_header hdr; /* header */
77 struct ceph_msg_footer footer; /* footer */
78 struct kvec front; /* unaligned blobs of message */
79 struct ceph_buffer *middle;
80 struct page **pages; /* data payload. NOT OWNER. */
81 unsigned nr_pages; /* size of page array */
82 struct ceph_pagelist *pagelist; /* instead of pages */
83 struct list_head list_head;
84 struct kref kref;
85 struct bio *bio; /* instead of pages/pagelist */
86 struct bio *bio_iter; /* bio iterator */
87 int bio_seg; /* current bio segment */
88 struct ceph_pagelist *trail; /* the trailing part of the data */
89 bool front_is_vmalloc;
90 bool more_to_follow;
91 bool needs_out_seq;
92 int front_max;
93
94 struct ceph_msgpool *pool;
95};
96
97struct ceph_msg_pos {
98 int page, page_pos; /* which page; offset in page */
99 int data_pos; /* offset in data payload */
100 int did_page_crc; /* true if we've calculated crc for current page */
101};
102
103/* ceph connection fault delay defaults, for exponential backoff */
104#define BASE_DELAY_INTERVAL (HZ/2)
105#define MAX_DELAY_INTERVAL (5 * 60 * HZ)
106
107/*
108 * ceph_connection state bit flags
109 *
110 * QUEUED and BUSY are used together to ensure that only a single
111 * thread is currently opening, reading or writing data to the socket.
112 */
113#define LOSSYTX 0 /* we can close channel or drop messages on errors */
114#define CONNECTING 1
115#define NEGOTIATING 2
116#define KEEPALIVE_PENDING 3
117#define WRITE_PENDING 4 /* we have data ready to send */
118#define QUEUED 5 /* there is work queued on this connection */
119#define BUSY 6 /* work is being done */
120#define STANDBY 8 /* no outgoing messages, socket closed. we keep
121 * the ceph_connection around to maintain shared
122 * state with the peer. */
123#define CLOSED 10 /* we've closed the connection */
124#define SOCK_CLOSED 11 /* socket state changed to closed */
125#define OPENING 13 /* open connection w/ (possibly new) peer */
126#define DEAD 14 /* dead, about to kfree */
127
128/*
129 * A single connection with another host.
130 *
131 * We maintain a queue of outgoing messages, and some session state to
132 * ensure that we can preserve the lossless, ordered delivery of
133 * messages in the case of a TCP disconnect.
134 */
135struct ceph_connection {
136 void *private;
137 atomic_t nref;
138
139 const struct ceph_connection_operations *ops;
140
141 struct ceph_messenger *msgr;
142 struct socket *sock;
143 unsigned long state; /* connection state (see flags above) */
144 const char *error_msg; /* error message, if any */
145
146 struct ceph_entity_addr peer_addr; /* peer address */
147 struct ceph_entity_name peer_name; /* peer name */
148 struct ceph_entity_addr peer_addr_for_me;
149 unsigned peer_features;
150 u32 connect_seq; /* identify the most recent connection
151 attempt for this connection, client */
152 u32 peer_global_seq; /* peer's global seq for this connection */
153
154 int auth_retry; /* true if we need a newer authorizer */
155 void *auth_reply_buf; /* where to put the authorizer reply */
156 int auth_reply_buf_len;
157
158 struct mutex mutex;
159
160 /* out queue */
161 struct list_head out_queue;
162 struct list_head out_sent; /* sending or sent but unacked */
163 u64 out_seq; /* last message queued for send */
164 bool out_keepalive_pending;
165
166 u64 in_seq, in_seq_acked; /* last message received, acked */
167
168 /* connection negotiation temps */
169 char in_banner[CEPH_BANNER_MAX_LEN];
170 union {
171 struct { /* outgoing connection */
172 struct ceph_msg_connect out_connect;
173 struct ceph_msg_connect_reply in_reply;
174 };
175 struct { /* incoming */
176 struct ceph_msg_connect in_connect;
177 struct ceph_msg_connect_reply out_reply;
178 };
179 };
180 struct ceph_entity_addr actual_peer_addr;
181
182 /* message out temps */
183 struct ceph_msg *out_msg; /* sending message (== tail of
184 out_sent) */
185 bool out_msg_done;
186 struct ceph_msg_pos out_msg_pos;
187
188 struct kvec out_kvec[8], /* sending header/footer data */
189 *out_kvec_cur;
190 int out_kvec_left; /* kvec's left in out_kvec */
191 int out_skip; /* skip this many bytes */
192 int out_kvec_bytes; /* total bytes left */
193 bool out_kvec_is_msg; /* kvec refers to out_msg */
194 int out_more; /* there is more data after the kvecs */
195 __le64 out_temp_ack; /* for writing an ack */
196
197 /* message in temps */
198 struct ceph_msg_header in_hdr;
199 struct ceph_msg *in_msg;
200 struct ceph_msg_pos in_msg_pos;
201 u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */
202
203 char in_tag; /* protocol control byte */
204 int in_base_pos; /* bytes read */
205 __le64 in_temp_ack; /* for reading an ack */
206
207 struct delayed_work work; /* send|recv work */
208 unsigned long delay; /* current delay interval */
209};
210
211
212extern const char *pr_addr(const struct sockaddr_storage *ss);
213extern int ceph_parse_ips(const char *c, const char *end,
214 struct ceph_entity_addr *addr,
215 int max_count, int *count);
216
217
218extern int ceph_msgr_init(void);
219extern void ceph_msgr_exit(void);
220extern void ceph_msgr_flush(void);
221
222extern struct ceph_messenger *ceph_messenger_create(
223 struct ceph_entity_addr *myaddr);
224extern void ceph_messenger_destroy(struct ceph_messenger *);
225
226extern void ceph_con_init(struct ceph_messenger *msgr,
227 struct ceph_connection *con);
228extern void ceph_con_open(struct ceph_connection *con,
229 struct ceph_entity_addr *addr);
230extern bool ceph_con_opened(struct ceph_connection *con);
231extern void ceph_con_close(struct ceph_connection *con);
232extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
233extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg);
234extern void ceph_con_revoke_message(struct ceph_connection *con,
235 struct ceph_msg *msg);
236extern void ceph_con_keepalive(struct ceph_connection *con);
237extern struct ceph_connection *ceph_con_get(struct ceph_connection *con);
238extern void ceph_con_put(struct ceph_connection *con);
239
240extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags);
241extern void ceph_msg_kfree(struct ceph_msg *m);
242
243
244static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
245{
246 kref_get(&msg->kref);
247 return msg;
248}
249extern void ceph_msg_last_put(struct kref *kref);
250static inline void ceph_msg_put(struct ceph_msg *msg)
251{
252 kref_put(&msg->kref, ceph_msg_last_put);
253}
254
255extern void ceph_msg_dump(struct ceph_msg *msg);
256
257#endif
diff --git a/fs/ceph/mon_client.c b/fs/ceph/mon_client.c
deleted file mode 100644
index b2a5a3e4a671..000000000000
--- a/fs/ceph/mon_client.c
+++ /dev/null
@@ -1,1018 +0,0 @@
1#include "ceph_debug.h"
2
3#include <linux/types.h>
4#include <linux/slab.h>
5#include <linux/random.h>
6#include <linux/sched.h>
7
8#include "mon_client.h"
9#include "super.h"
10#include "auth.h"
11#include "decode.h"
12
13/*
14 * Interact with Ceph monitor cluster. Handle requests for new map
15 * versions, and periodically resend as needed. Also implement
16 * statfs() and umount().
17 *
18 * A small cluster of Ceph "monitors" are responsible for managing critical
19 * cluster configuration and state information. An odd number (e.g., 3, 5)
20 * of cmon daemons use a modified version of the Paxos part-time parliament
21 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
22 * list of clients who have mounted the file system.
23 *
24 * We maintain an open, active session with a monitor at all times in order to
25 * receive timely MDSMap updates. We periodically send a keepalive byte on the
26 * TCP socket to ensure we detect a failure. If the connection does break, we
27 * randomly hunt for a new monitor. Once the connection is reestablished, we
28 * resend any outstanding requests.
29 */
30
31static const struct ceph_connection_operations mon_con_ops;
32
33static int __validate_auth(struct ceph_mon_client *monc);
34
35/*
36 * Decode a monmap blob (e.g., during mount).
37 */
38struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
39{
40 struct ceph_monmap *m = NULL;
41 int i, err = -EINVAL;
42 struct ceph_fsid fsid;
43 u32 epoch, num_mon;
44 u16 version;
45 u32 len;
46
47 ceph_decode_32_safe(&p, end, len, bad);
48 ceph_decode_need(&p, end, len, bad);
49
50 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
51
52 ceph_decode_16_safe(&p, end, version, bad);
53
54 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
55 ceph_decode_copy(&p, &fsid, sizeof(fsid));
56 epoch = ceph_decode_32(&p);
57
58 num_mon = ceph_decode_32(&p);
59 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
60
61 if (num_mon >= CEPH_MAX_MON)
62 goto bad;
63 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
64 if (m == NULL)
65 return ERR_PTR(-ENOMEM);
66 m->fsid = fsid;
67 m->epoch = epoch;
68 m->num_mon = num_mon;
69 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
70 for (i = 0; i < num_mon; i++)
71 ceph_decode_addr(&m->mon_inst[i].addr);
72
73 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
74 m->num_mon);
75 for (i = 0; i < m->num_mon; i++)
76 dout("monmap_decode mon%d is %s\n", i,
77 pr_addr(&m->mon_inst[i].addr.in_addr));
78 return m;
79
80bad:
81 dout("monmap_decode failed with %d\n", err);
82 kfree(m);
83 return ERR_PTR(err);
84}
85
86/*
87 * return true if *addr is included in the monmap.
88 */
89int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
90{
91 int i;
92
93 for (i = 0; i < m->num_mon; i++)
94 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
95 return 1;
96 return 0;
97}
98
99/*
100 * Send an auth request.
101 */
102static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
103{
104 monc->pending_auth = 1;
105 monc->m_auth->front.iov_len = len;
106 monc->m_auth->hdr.front_len = cpu_to_le32(len);
107 ceph_con_revoke(monc->con, monc->m_auth);
108 ceph_msg_get(monc->m_auth); /* keep our ref */
109 ceph_con_send(monc->con, monc->m_auth);
110}
111
112/*
113 * Close monitor session, if any.
114 */
115static void __close_session(struct ceph_mon_client *monc)
116{
117 if (monc->con) {
118 dout("__close_session closing mon%d\n", monc->cur_mon);
119 ceph_con_revoke(monc->con, monc->m_auth);
120 ceph_con_close(monc->con);
121 monc->cur_mon = -1;
122 monc->pending_auth = 0;
123 ceph_auth_reset(monc->auth);
124 }
125}
126
127/*
128 * Open a session with a (new) monitor.
129 */
130static int __open_session(struct ceph_mon_client *monc)
131{
132 char r;
133 int ret;
134
135 if (monc->cur_mon < 0) {
136 get_random_bytes(&r, 1);
137 monc->cur_mon = r % monc->monmap->num_mon;
138 dout("open_session num=%d r=%d -> mon%d\n",
139 monc->monmap->num_mon, r, monc->cur_mon);
140 monc->sub_sent = 0;
141 monc->sub_renew_after = jiffies; /* i.e., expired */
142 monc->want_next_osdmap = !!monc->want_next_osdmap;
143
144 dout("open_session mon%d opening\n", monc->cur_mon);
145 monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON;
146 monc->con->peer_name.num = cpu_to_le64(monc->cur_mon);
147 ceph_con_open(monc->con,
148 &monc->monmap->mon_inst[monc->cur_mon].addr);
149
150 /* initiatiate authentication handshake */
151 ret = ceph_auth_build_hello(monc->auth,
152 monc->m_auth->front.iov_base,
153 monc->m_auth->front_max);
154 __send_prepared_auth_request(monc, ret);
155 } else {
156 dout("open_session mon%d already open\n", monc->cur_mon);
157 }
158 return 0;
159}
160
161static bool __sub_expired(struct ceph_mon_client *monc)
162{
163 return time_after_eq(jiffies, monc->sub_renew_after);
164}
165
166/*
167 * Reschedule delayed work timer.
168 */
169static void __schedule_delayed(struct ceph_mon_client *monc)
170{
171 unsigned delay;
172
173 if (monc->cur_mon < 0 || __sub_expired(monc))
174 delay = 10 * HZ;
175 else
176 delay = 20 * HZ;
177 dout("__schedule_delayed after %u\n", delay);
178 schedule_delayed_work(&monc->delayed_work, delay);
179}
180
181/*
182 * Send subscribe request for mdsmap and/or osdmap.
183 */
184static void __send_subscribe(struct ceph_mon_client *monc)
185{
186 dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n",
187 (unsigned)monc->sub_sent, __sub_expired(monc),
188 monc->want_next_osdmap);
189 if ((__sub_expired(monc) && !monc->sub_sent) ||
190 monc->want_next_osdmap == 1) {
191 struct ceph_msg *msg = monc->m_subscribe;
192 struct ceph_mon_subscribe_item *i;
193 void *p, *end;
194
195 p = msg->front.iov_base;
196 end = p + msg->front_max;
197
198 dout("__send_subscribe to 'mdsmap' %u+\n",
199 (unsigned)monc->have_mdsmap);
200 if (monc->want_next_osdmap) {
201 dout("__send_subscribe to 'osdmap' %u\n",
202 (unsigned)monc->have_osdmap);
203 ceph_encode_32(&p, 3);
204 ceph_encode_string(&p, end, "osdmap", 6);
205 i = p;
206 i->have = cpu_to_le64(monc->have_osdmap);
207 i->onetime = 1;
208 p += sizeof(*i);
209 monc->want_next_osdmap = 2; /* requested */
210 } else {
211 ceph_encode_32(&p, 2);
212 }
213 ceph_encode_string(&p, end, "mdsmap", 6);
214 i = p;
215 i->have = cpu_to_le64(monc->have_mdsmap);
216 i->onetime = 0;
217 p += sizeof(*i);
218 ceph_encode_string(&p, end, "monmap", 6);
219 i = p;
220 i->have = 0;
221 i->onetime = 0;
222 p += sizeof(*i);
223
224 msg->front.iov_len = p - msg->front.iov_base;
225 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
226 ceph_con_revoke(monc->con, msg);
227 ceph_con_send(monc->con, ceph_msg_get(msg));
228
229 monc->sub_sent = jiffies | 1; /* never 0 */
230 }
231}
232
233static void handle_subscribe_ack(struct ceph_mon_client *monc,
234 struct ceph_msg *msg)
235{
236 unsigned seconds;
237 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
238
239 if (msg->front.iov_len < sizeof(*h))
240 goto bad;
241 seconds = le32_to_cpu(h->duration);
242
243 mutex_lock(&monc->mutex);
244 if (monc->hunting) {
245 pr_info("mon%d %s session established\n",
246 monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr));
247 monc->hunting = false;
248 }
249 dout("handle_subscribe_ack after %d seconds\n", seconds);
250 monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1;
251 monc->sub_sent = 0;
252 mutex_unlock(&monc->mutex);
253 return;
254bad:
255 pr_err("got corrupt subscribe-ack msg\n");
256 ceph_msg_dump(msg);
257}
258
259/*
260 * Keep track of which maps we have
261 */
262int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got)
263{
264 mutex_lock(&monc->mutex);
265 monc->have_mdsmap = got;
266 mutex_unlock(&monc->mutex);
267 return 0;
268}
269
270int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got)
271{
272 mutex_lock(&monc->mutex);
273 monc->have_osdmap = got;
274 monc->want_next_osdmap = 0;
275 mutex_unlock(&monc->mutex);
276 return 0;
277}
278
279/*
280 * Register interest in the next osdmap
281 */
282void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc)
283{
284 dout("request_next_osdmap have %u\n", monc->have_osdmap);
285 mutex_lock(&monc->mutex);
286 if (!monc->want_next_osdmap)
287 monc->want_next_osdmap = 1;
288 if (monc->want_next_osdmap < 2)
289 __send_subscribe(monc);
290 mutex_unlock(&monc->mutex);
291}
292
293/*
294 *
295 */
296int ceph_monc_open_session(struct ceph_mon_client *monc)
297{
298 if (!monc->con) {
299 monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL);
300 if (!monc->con)
301 return -ENOMEM;
302 ceph_con_init(monc->client->msgr, monc->con);
303 monc->con->private = monc;
304 monc->con->ops = &mon_con_ops;
305 }
306
307 mutex_lock(&monc->mutex);
308 __open_session(monc);
309 __schedule_delayed(monc);
310 mutex_unlock(&monc->mutex);
311 return 0;
312}
313
314/*
315 * The monitor responds with mount ack indicate mount success. The
316 * included client ticket allows the client to talk to MDSs and OSDs.
317 */
318static void ceph_monc_handle_map(struct ceph_mon_client *monc,
319 struct ceph_msg *msg)
320{
321 struct ceph_client *client = monc->client;
322 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
323 void *p, *end;
324
325 mutex_lock(&monc->mutex);
326
327 dout("handle_monmap\n");
328 p = msg->front.iov_base;
329 end = p + msg->front.iov_len;
330
331 monmap = ceph_monmap_decode(p, end);
332 if (IS_ERR(monmap)) {
333 pr_err("problem decoding monmap, %d\n",
334 (int)PTR_ERR(monmap));
335 goto out;
336 }
337
338 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
339 kfree(monmap);
340 goto out;
341 }
342
343 client->monc.monmap = monmap;
344 kfree(old);
345
346out:
347 mutex_unlock(&monc->mutex);
348 wake_up_all(&client->auth_wq);
349}
350
351/*
352 * generic requests (e.g., statfs, poolop)
353 */
354static struct ceph_mon_generic_request *__lookup_generic_req(
355 struct ceph_mon_client *monc, u64 tid)
356{
357 struct ceph_mon_generic_request *req;
358 struct rb_node *n = monc->generic_request_tree.rb_node;
359
360 while (n) {
361 req = rb_entry(n, struct ceph_mon_generic_request, node);
362 if (tid < req->tid)
363 n = n->rb_left;
364 else if (tid > req->tid)
365 n = n->rb_right;
366 else
367 return req;
368 }
369 return NULL;
370}
371
372static void __insert_generic_request(struct ceph_mon_client *monc,
373 struct ceph_mon_generic_request *new)
374{
375 struct rb_node **p = &monc->generic_request_tree.rb_node;
376 struct rb_node *parent = NULL;
377 struct ceph_mon_generic_request *req = NULL;
378
379 while (*p) {
380 parent = *p;
381 req = rb_entry(parent, struct ceph_mon_generic_request, node);
382 if (new->tid < req->tid)
383 p = &(*p)->rb_left;
384 else if (new->tid > req->tid)
385 p = &(*p)->rb_right;
386 else
387 BUG();
388 }
389
390 rb_link_node(&new->node, parent, p);
391 rb_insert_color(&new->node, &monc->generic_request_tree);
392}
393
394static void release_generic_request(struct kref *kref)
395{
396 struct ceph_mon_generic_request *req =
397 container_of(kref, struct ceph_mon_generic_request, kref);
398
399 if (req->reply)
400 ceph_msg_put(req->reply);
401 if (req->request)
402 ceph_msg_put(req->request);
403
404 kfree(req);
405}
406
407static void put_generic_request(struct ceph_mon_generic_request *req)
408{
409 kref_put(&req->kref, release_generic_request);
410}
411
412static void get_generic_request(struct ceph_mon_generic_request *req)
413{
414 kref_get(&req->kref);
415}
416
417static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
418 struct ceph_msg_header *hdr,
419 int *skip)
420{
421 struct ceph_mon_client *monc = con->private;
422 struct ceph_mon_generic_request *req;
423 u64 tid = le64_to_cpu(hdr->tid);
424 struct ceph_msg *m;
425
426 mutex_lock(&monc->mutex);
427 req = __lookup_generic_req(monc, tid);
428 if (!req) {
429 dout("get_generic_reply %lld dne\n", tid);
430 *skip = 1;
431 m = NULL;
432 } else {
433 dout("get_generic_reply %lld got %p\n", tid, req->reply);
434 m = ceph_msg_get(req->reply);
435 /*
436 * we don't need to track the connection reading into
437 * this reply because we only have one open connection
438 * at a time, ever.
439 */
440 }
441 mutex_unlock(&monc->mutex);
442 return m;
443}
444
445static int do_generic_request(struct ceph_mon_client *monc,
446 struct ceph_mon_generic_request *req)
447{
448 int err;
449
450 /* register request */
451 mutex_lock(&monc->mutex);
452 req->tid = ++monc->last_tid;
453 req->request->hdr.tid = cpu_to_le64(req->tid);
454 __insert_generic_request(monc, req);
455 monc->num_generic_requests++;
456 ceph_con_send(monc->con, ceph_msg_get(req->request));
457 mutex_unlock(&monc->mutex);
458
459 err = wait_for_completion_interruptible(&req->completion);
460
461 mutex_lock(&monc->mutex);
462 rb_erase(&req->node, &monc->generic_request_tree);
463 monc->num_generic_requests--;
464 mutex_unlock(&monc->mutex);
465
466 if (!err)
467 err = req->result;
468 return err;
469}
470
471/*
472 * statfs
473 */
474static void handle_statfs_reply(struct ceph_mon_client *monc,
475 struct ceph_msg *msg)
476{
477 struct ceph_mon_generic_request *req;
478 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
479 u64 tid = le64_to_cpu(msg->hdr.tid);
480
481 if (msg->front.iov_len != sizeof(*reply))
482 goto bad;
483 dout("handle_statfs_reply %p tid %llu\n", msg, tid);
484
485 mutex_lock(&monc->mutex);
486 req = __lookup_generic_req(monc, tid);
487 if (req) {
488 *(struct ceph_statfs *)req->buf = reply->st;
489 req->result = 0;
490 get_generic_request(req);
491 }
492 mutex_unlock(&monc->mutex);
493 if (req) {
494 complete_all(&req->completion);
495 put_generic_request(req);
496 }
497 return;
498
499bad:
500 pr_err("corrupt generic reply, tid %llu\n", tid);
501 ceph_msg_dump(msg);
502}
503
504/*
505 * Do a synchronous statfs().
506 */
507int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
508{
509 struct ceph_mon_generic_request *req;
510 struct ceph_mon_statfs *h;
511 int err;
512
513 req = kzalloc(sizeof(*req), GFP_NOFS);
514 if (!req)
515 return -ENOMEM;
516
517 kref_init(&req->kref);
518 req->buf = buf;
519 req->buf_len = sizeof(*buf);
520 init_completion(&req->completion);
521
522 err = -ENOMEM;
523 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS);
524 if (!req->request)
525 goto out;
526 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS);
527 if (!req->reply)
528 goto out;
529
530 /* fill out request */
531 h = req->request->front.iov_base;
532 h->monhdr.have_version = 0;
533 h->monhdr.session_mon = cpu_to_le16(-1);
534 h->monhdr.session_mon_tid = 0;
535 h->fsid = monc->monmap->fsid;
536
537 err = do_generic_request(monc, req);
538
539out:
540 kref_put(&req->kref, release_generic_request);
541 return err;
542}
543
544/*
545 * pool ops
546 */
547static int get_poolop_reply_buf(const char *src, size_t src_len,
548 char *dst, size_t dst_len)
549{
550 u32 buf_len;
551
552 if (src_len != sizeof(u32) + dst_len)
553 return -EINVAL;
554
555 buf_len = le32_to_cpu(*(u32 *)src);
556 if (buf_len != dst_len)
557 return -EINVAL;
558
559 memcpy(dst, src + sizeof(u32), dst_len);
560 return 0;
561}
562
563static void handle_poolop_reply(struct ceph_mon_client *monc,
564 struct ceph_msg *msg)
565{
566 struct ceph_mon_generic_request *req;
567 struct ceph_mon_poolop_reply *reply = msg->front.iov_base;
568 u64 tid = le64_to_cpu(msg->hdr.tid);
569
570 if (msg->front.iov_len < sizeof(*reply))
571 goto bad;
572 dout("handle_poolop_reply %p tid %llu\n", msg, tid);
573
574 mutex_lock(&monc->mutex);
575 req = __lookup_generic_req(monc, tid);
576 if (req) {
577 if (req->buf_len &&
578 get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply),
579 msg->front.iov_len - sizeof(*reply),
580 req->buf, req->buf_len) < 0) {
581 mutex_unlock(&monc->mutex);
582 goto bad;
583 }
584 req->result = le32_to_cpu(reply->reply_code);
585 get_generic_request(req);
586 }
587 mutex_unlock(&monc->mutex);
588 if (req) {
589 complete(&req->completion);
590 put_generic_request(req);
591 }
592 return;
593
594bad:
595 pr_err("corrupt generic reply, tid %llu\n", tid);
596 ceph_msg_dump(msg);
597}
598
599/*
600 * Do a synchronous pool op.
601 */
602int ceph_monc_do_poolop(struct ceph_mon_client *monc, u32 op,
603 u32 pool, u64 snapid,
604 char *buf, int len)
605{
606 struct ceph_mon_generic_request *req;
607 struct ceph_mon_poolop *h;
608 int err;
609
610 req = kzalloc(sizeof(*req), GFP_NOFS);
611 if (!req)
612 return -ENOMEM;
613
614 kref_init(&req->kref);
615 req->buf = buf;
616 req->buf_len = len;
617 init_completion(&req->completion);
618
619 err = -ENOMEM;
620 req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS);
621 if (!req->request)
622 goto out;
623 req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS);
624 if (!req->reply)
625 goto out;
626
627 /* fill out request */
628 req->request->hdr.version = cpu_to_le16(2);
629 h = req->request->front.iov_base;
630 h->monhdr.have_version = 0;
631 h->monhdr.session_mon = cpu_to_le16(-1);
632 h->monhdr.session_mon_tid = 0;
633 h->fsid = monc->monmap->fsid;
634 h->pool = cpu_to_le32(pool);
635 h->op = cpu_to_le32(op);
636 h->auid = 0;
637 h->snapid = cpu_to_le64(snapid);
638 h->name_len = 0;
639
640 err = do_generic_request(monc, req);
641
642out:
643 kref_put(&req->kref, release_generic_request);
644 return err;
645}
646
647int ceph_monc_create_snapid(struct ceph_mon_client *monc,
648 u32 pool, u64 *snapid)
649{
650 return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
651 pool, 0, (char *)snapid, sizeof(*snapid));
652
653}
654
655int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
656 u32 pool, u64 snapid)
657{
658 return ceph_monc_do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP,
659 pool, snapid, 0, 0);
660
661}
662
663/*
664 * Resend pending generic requests.
665 */
666static void __resend_generic_request(struct ceph_mon_client *monc)
667{
668 struct ceph_mon_generic_request *req;
669 struct rb_node *p;
670
671 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
672 req = rb_entry(p, struct ceph_mon_generic_request, node);
673 ceph_con_revoke(monc->con, req->request);
674 ceph_con_send(monc->con, ceph_msg_get(req->request));
675 }
676}
677
678/*
679 * Delayed work. If we haven't mounted yet, retry. Otherwise,
680 * renew/retry subscription as needed (in case it is timing out, or we
681 * got an ENOMEM). And keep the monitor connection alive.
682 */
683static void delayed_work(struct work_struct *work)
684{
685 struct ceph_mon_client *monc =
686 container_of(work, struct ceph_mon_client, delayed_work.work);
687
688 dout("monc delayed_work\n");
689 mutex_lock(&monc->mutex);
690 if (monc->hunting) {
691 __close_session(monc);
692 __open_session(monc); /* continue hunting */
693 } else {
694 ceph_con_keepalive(monc->con);
695
696 __validate_auth(monc);
697
698 if (monc->auth->ops->is_authenticated(monc->auth))
699 __send_subscribe(monc);
700 }
701 __schedule_delayed(monc);
702 mutex_unlock(&monc->mutex);
703}
704
705/*
706 * On startup, we build a temporary monmap populated with the IPs
707 * provided by mount(2).
708 */
709static int build_initial_monmap(struct ceph_mon_client *monc)
710{
711 struct ceph_mount_args *args = monc->client->mount_args;
712 struct ceph_entity_addr *mon_addr = args->mon_addr;
713 int num_mon = args->num_mon;
714 int i;
715
716 /* build initial monmap */
717 monc->monmap = kzalloc(sizeof(*monc->monmap) +
718 num_mon*sizeof(monc->monmap->mon_inst[0]),
719 GFP_KERNEL);
720 if (!monc->monmap)
721 return -ENOMEM;
722 for (i = 0; i < num_mon; i++) {
723 monc->monmap->mon_inst[i].addr = mon_addr[i];
724 monc->monmap->mon_inst[i].addr.nonce = 0;
725 monc->monmap->mon_inst[i].name.type =
726 CEPH_ENTITY_TYPE_MON;
727 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
728 }
729 monc->monmap->num_mon = num_mon;
730 monc->have_fsid = false;
731
732 /* release addr memory */
733 kfree(args->mon_addr);
734 args->mon_addr = NULL;
735 args->num_mon = 0;
736 return 0;
737}
738
739int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
740{
741 int err = 0;
742
743 dout("init\n");
744 memset(monc, 0, sizeof(*monc));
745 monc->client = cl;
746 monc->monmap = NULL;
747 mutex_init(&monc->mutex);
748
749 err = build_initial_monmap(monc);
750 if (err)
751 goto out;
752
753 monc->con = NULL;
754
755 /* authentication */
756 monc->auth = ceph_auth_init(cl->mount_args->name,
757 cl->mount_args->secret);
758 if (IS_ERR(monc->auth))
759 return PTR_ERR(monc->auth);
760 monc->auth->want_keys =
761 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
762 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
763
764 /* msgs */
765 err = -ENOMEM;
766 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
767 sizeof(struct ceph_mon_subscribe_ack),
768 GFP_NOFS);
769 if (!monc->m_subscribe_ack)
770 goto out_monmap;
771
772 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, GFP_NOFS);
773 if (!monc->m_subscribe)
774 goto out_subscribe_ack;
775
776 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS);
777 if (!monc->m_auth_reply)
778 goto out_subscribe;
779
780 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS);
781 monc->pending_auth = 0;
782 if (!monc->m_auth)
783 goto out_auth_reply;
784
785 monc->cur_mon = -1;
786 monc->hunting = true;
787 monc->sub_renew_after = jiffies;
788 monc->sub_sent = 0;
789
790 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
791 monc->generic_request_tree = RB_ROOT;
792 monc->num_generic_requests = 0;
793 monc->last_tid = 0;
794
795 monc->have_mdsmap = 0;
796 monc->have_osdmap = 0;
797 monc->want_next_osdmap = 1;
798 return 0;
799
800out_auth_reply:
801 ceph_msg_put(monc->m_auth_reply);
802out_subscribe:
803 ceph_msg_put(monc->m_subscribe);
804out_subscribe_ack:
805 ceph_msg_put(monc->m_subscribe_ack);
806out_monmap:
807 kfree(monc->monmap);
808out:
809 return err;
810}
811
812void ceph_monc_stop(struct ceph_mon_client *monc)
813{
814 dout("stop\n");
815 cancel_delayed_work_sync(&monc->delayed_work);
816
817 mutex_lock(&monc->mutex);
818 __close_session(monc);
819 if (monc->con) {
820 monc->con->private = NULL;
821 monc->con->ops->put(monc->con);
822 monc->con = NULL;
823 }
824 mutex_unlock(&monc->mutex);
825
826 ceph_auth_destroy(monc->auth);
827
828 ceph_msg_put(monc->m_auth);
829 ceph_msg_put(monc->m_auth_reply);
830 ceph_msg_put(monc->m_subscribe);
831 ceph_msg_put(monc->m_subscribe_ack);
832
833 kfree(monc->monmap);
834}
835
836static void handle_auth_reply(struct ceph_mon_client *monc,
837 struct ceph_msg *msg)
838{
839 int ret;
840 int was_auth = 0;
841
842 mutex_lock(&monc->mutex);
843 if (monc->auth->ops)
844 was_auth = monc->auth->ops->is_authenticated(monc->auth);
845 monc->pending_auth = 0;
846 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
847 msg->front.iov_len,
848 monc->m_auth->front.iov_base,
849 monc->m_auth->front_max);
850 if (ret < 0) {
851 monc->client->auth_err = ret;
852 wake_up_all(&monc->client->auth_wq);
853 } else if (ret > 0) {
854 __send_prepared_auth_request(monc, ret);
855 } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
856 dout("authenticated, starting session\n");
857
858 monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
859 monc->client->msgr->inst.name.num =
860 cpu_to_le64(monc->auth->global_id);
861
862 __send_subscribe(monc);
863 __resend_generic_request(monc);
864 }
865 mutex_unlock(&monc->mutex);
866}
867
868static int __validate_auth(struct ceph_mon_client *monc)
869{
870 int ret;
871
872 if (monc->pending_auth)
873 return 0;
874
875 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
876 monc->m_auth->front_max);
877 if (ret <= 0)
878 return ret; /* either an error, or no need to authenticate */
879 __send_prepared_auth_request(monc, ret);
880 return 0;
881}
882
883int ceph_monc_validate_auth(struct ceph_mon_client *monc)
884{
885 int ret;
886
887 mutex_lock(&monc->mutex);
888 ret = __validate_auth(monc);
889 mutex_unlock(&monc->mutex);
890 return ret;
891}
892
893/*
894 * handle incoming message
895 */
896static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
897{
898 struct ceph_mon_client *monc = con->private;
899 int type = le16_to_cpu(msg->hdr.type);
900
901 if (!monc)
902 return;
903
904 switch (type) {
905 case CEPH_MSG_AUTH_REPLY:
906 handle_auth_reply(monc, msg);
907 break;
908
909 case CEPH_MSG_MON_SUBSCRIBE_ACK:
910 handle_subscribe_ack(monc, msg);
911 break;
912
913 case CEPH_MSG_STATFS_REPLY:
914 handle_statfs_reply(monc, msg);
915 break;
916
917 case CEPH_MSG_POOLOP_REPLY:
918 handle_poolop_reply(monc, msg);
919 break;
920
921 case CEPH_MSG_MON_MAP:
922 ceph_monc_handle_map(monc, msg);
923 break;
924
925 case CEPH_MSG_MDS_MAP:
926 ceph_mdsc_handle_map(&monc->client->mdsc, msg);
927 break;
928
929 case CEPH_MSG_OSD_MAP:
930 ceph_osdc_handle_map(&monc->client->osdc, msg);
931 break;
932
933 default:
934 pr_err("received unknown message type %d %s\n", type,
935 ceph_msg_type_name(type));
936 }
937 ceph_msg_put(msg);
938}
939
940/*
941 * Allocate memory for incoming message
942 */
943static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
944 struct ceph_msg_header *hdr,
945 int *skip)
946{
947 struct ceph_mon_client *monc = con->private;
948 int type = le16_to_cpu(hdr->type);
949 int front_len = le32_to_cpu(hdr->front_len);
950 struct ceph_msg *m = NULL;
951
952 *skip = 0;
953
954 switch (type) {
955 case CEPH_MSG_MON_SUBSCRIBE_ACK:
956 m = ceph_msg_get(monc->m_subscribe_ack);
957 break;
958 case CEPH_MSG_POOLOP_REPLY:
959 case CEPH_MSG_STATFS_REPLY:
960 return get_generic_reply(con, hdr, skip);
961 case CEPH_MSG_AUTH_REPLY:
962 m = ceph_msg_get(monc->m_auth_reply);
963 break;
964 case CEPH_MSG_MON_MAP:
965 case CEPH_MSG_MDS_MAP:
966 case CEPH_MSG_OSD_MAP:
967 m = ceph_msg_new(type, front_len, GFP_NOFS);
968 break;
969 }
970
971 if (!m) {
972 pr_info("alloc_msg unknown type %d\n", type);
973 *skip = 1;
974 }
975 return m;
976}
977
978/*
979 * If the monitor connection resets, pick a new monitor and resubmit
980 * any pending requests.
981 */
982static void mon_fault(struct ceph_connection *con)
983{
984 struct ceph_mon_client *monc = con->private;
985
986 if (!monc)
987 return;
988
989 dout("mon_fault\n");
990 mutex_lock(&monc->mutex);
991 if (!con->private)
992 goto out;
993
994 if (monc->con && !monc->hunting)
995 pr_info("mon%d %s session lost, "
996 "hunting for new mon\n", monc->cur_mon,
997 pr_addr(&monc->con->peer_addr.in_addr));
998
999 __close_session(monc);
1000 if (!monc->hunting) {
1001 /* start hunting */
1002 monc->hunting = true;
1003 __open_session(monc);
1004 } else {
1005 /* already hunting, let's wait a bit */
1006 __schedule_delayed(monc);
1007 }
1008out:
1009 mutex_unlock(&monc->mutex);
1010}
1011
1012static const struct ceph_connection_operations mon_con_ops = {
1013 .get = ceph_con_get,
1014 .put = ceph_con_put,
1015 .dispatch = dispatch,
1016 .fault = mon_fault,
1017 .alloc_msg = mon_alloc_msg,
1018};
diff --git a/fs/ceph/mon_client.h b/fs/ceph/mon_client.h
deleted file mode 100644
index 8e396f2c0963..000000000000
--- a/fs/ceph/mon_client.h
+++ /dev/null
@@ -1,121 +0,0 @@
1#ifndef _FS_CEPH_MON_CLIENT_H
2#define _FS_CEPH_MON_CLIENT_H
3
4#include <linux/completion.h>
5#include <linux/kref.h>
6#include <linux/rbtree.h>
7
8#include "messenger.h"
9
10struct ceph_client;
11struct ceph_mount_args;
12struct ceph_auth_client;
13
14/*
15 * The monitor map enumerates the set of all monitors.
16 */
17struct ceph_monmap {
18 struct ceph_fsid fsid;
19 u32 epoch;
20 u32 num_mon;
21 struct ceph_entity_inst mon_inst[0];
22};
23
24struct ceph_mon_client;
25struct ceph_mon_generic_request;
26
27
28/*
29 * Generic mechanism for resending monitor requests.
30 */
31typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc,
32 int newmon);
33
34/* a pending monitor request */
35struct ceph_mon_request {
36 struct ceph_mon_client *monc;
37 struct delayed_work delayed_work;
38 unsigned long delay;
39 ceph_monc_request_func_t do_request;
40};
41
42/*
43 * ceph_mon_generic_request is being used for the statfs and poolop requests
44 * which are bening done a bit differently because we need to get data back
45 * to the caller
46 */
47struct ceph_mon_generic_request {
48 struct kref kref;
49 u64 tid;
50 struct rb_node node;
51 int result;
52 void *buf;
53 int buf_len;
54 struct completion completion;
55 struct ceph_msg *request; /* original request */
56 struct ceph_msg *reply; /* and reply */
57};
58
59struct ceph_mon_client {
60 struct ceph_client *client;
61 struct ceph_monmap *monmap;
62
63 struct mutex mutex;
64 struct delayed_work delayed_work;
65
66 struct ceph_auth_client *auth;
67 struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe, *m_subscribe_ack;
68 int pending_auth;
69
70 bool hunting;
71 int cur_mon; /* last monitor i contacted */
72 unsigned long sub_sent, sub_renew_after;
73 struct ceph_connection *con;
74 bool have_fsid;
75
76 /* pending generic requests */
77 struct rb_root generic_request_tree;
78 int num_generic_requests;
79 u64 last_tid;
80
81 /* mds/osd map */
82 int want_next_osdmap; /* 1 = want, 2 = want+asked */
83 u32 have_osdmap, have_mdsmap;
84
85#ifdef CONFIG_DEBUG_FS
86 struct dentry *debugfs_file;
87#endif
88};
89
90extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end);
91extern int ceph_monmap_contains(struct ceph_monmap *m,
92 struct ceph_entity_addr *addr);
93
94extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl);
95extern void ceph_monc_stop(struct ceph_mon_client *monc);
96
97/*
98 * The model here is to indicate that we need a new map of at least
99 * epoch @want, and also call in when we receive a map. We will
100 * periodically rerequest the map from the monitor cluster until we
101 * get what we want.
102 */
103extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have);
104extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have);
105
106extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc);
107
108extern int ceph_monc_do_statfs(struct ceph_mon_client *monc,
109 struct ceph_statfs *buf);
110
111extern int ceph_monc_open_session(struct ceph_mon_client *monc);
112
113extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
114
115extern int ceph_monc_create_snapid(struct ceph_mon_client *monc,
116 u32 pool, u64 *snapid);
117
118extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
119 u32 pool, u64 snapid);
120
121#endif
diff --git a/fs/ceph/msgpool.c b/fs/ceph/msgpool.c
deleted file mode 100644
index dd65a6438131..000000000000
--- a/fs/ceph/msgpool.c
+++ /dev/null
@@ -1,64 +0,0 @@
1#include "ceph_debug.h"
2
3#include <linux/err.h>
4#include <linux/sched.h>
5#include <linux/types.h>
6#include <linux/vmalloc.h>
7
8#include "msgpool.h"
9
10static void *alloc_fn(gfp_t gfp_mask, void *arg)
11{
12 struct ceph_msgpool *pool = arg;
13 void *p;
14
15 p = ceph_msg_new(0, pool->front_len, gfp_mask);
16 if (!p)
17 pr_err("msgpool %s alloc failed\n", pool->name);
18 return p;
19}
20
21static void free_fn(void *element, void *arg)
22{
23 ceph_msg_put(element);
24}
25
26int ceph_msgpool_init(struct ceph_msgpool *pool,
27 int front_len, int size, bool blocking, const char *name)
28{
29 pool->front_len = front_len;
30 pool->pool = mempool_create(size, alloc_fn, free_fn, pool);
31 if (!pool->pool)
32 return -ENOMEM;
33 pool->name = name;
34 return 0;
35}
36
37void ceph_msgpool_destroy(struct ceph_msgpool *pool)
38{
39 mempool_destroy(pool->pool);
40}
41
42struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *pool,
43 int front_len)
44{
45 if (front_len > pool->front_len) {
46 pr_err("msgpool_get pool %s need front %d, pool size is %d\n",
47 pool->name, front_len, pool->front_len);
48 WARN_ON(1);
49
50 /* try to alloc a fresh message */
51 return ceph_msg_new(0, front_len, GFP_NOFS);
52 }
53
54 return mempool_alloc(pool->pool, GFP_NOFS);
55}
56
57void ceph_msgpool_put(struct ceph_msgpool *pool, struct ceph_msg *msg)
58{
59 /* reset msg front_len; user may have changed it */
60 msg->front.iov_len = pool->front_len;
61 msg->hdr.front_len = cpu_to_le32(pool->front_len);
62
63 kref_init(&msg->kref); /* retake single ref */
64}
diff --git a/fs/ceph/msgpool.h b/fs/ceph/msgpool.h
deleted file mode 100644
index a362605f9368..000000000000
--- a/fs/ceph/msgpool.h
+++ /dev/null
@@ -1,25 +0,0 @@
1#ifndef _FS_CEPH_MSGPOOL
2#define _FS_CEPH_MSGPOOL
3
4#include <linux/mempool.h>
5#include "messenger.h"
6
7/*
8 * we use memory pools for preallocating messages we may receive, to
9 * avoid unexpected OOM conditions.
10 */
11struct ceph_msgpool {
12 const char *name;
13 mempool_t *pool;
14 int front_len; /* preallocated payload size */
15};
16
17extern int ceph_msgpool_init(struct ceph_msgpool *pool,
18 int front_len, int size, bool blocking,
19 const char *name);
20extern void ceph_msgpool_destroy(struct ceph_msgpool *pool);
21extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *,
22 int front_len);
23extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *);
24
25#endif
diff --git a/fs/ceph/msgr.h b/fs/ceph/msgr.h
deleted file mode 100644
index 680d3d648cac..000000000000
--- a/fs/ceph/msgr.h
+++ /dev/null
@@ -1,175 +0,0 @@
1#ifndef CEPH_MSGR_H
2#define CEPH_MSGR_H
3
4/*
5 * Data types for message passing layer used by Ceph.
6 */
7
8#define CEPH_MON_PORT 6789 /* default monitor port */
9
10/*
11 * client-side processes will try to bind to ports in this
12 * range, simply for the benefit of tools like nmap or wireshark
13 * that would like to identify the protocol.
14 */
15#define CEPH_PORT_FIRST 6789
16#define CEPH_PORT_START 6800 /* non-monitors start here */
17#define CEPH_PORT_LAST 6900
18
19/*
20 * tcp connection banner. include a protocol version. and adjust
21 * whenever the wire protocol changes. try to keep this string length
22 * constant.
23 */
24#define CEPH_BANNER "ceph v027"
25#define CEPH_BANNER_MAX_LEN 30
26
27
28/*
29 * Rollover-safe type and comparator for 32-bit sequence numbers.
30 * Comparator returns -1, 0, or 1.
31 */
32typedef __u32 ceph_seq_t;
33
34static inline __s32 ceph_seq_cmp(__u32 a, __u32 b)
35{
36 return (__s32)a - (__s32)b;
37}
38
39
40/*
41 * entity_name -- logical name for a process participating in the
42 * network, e.g. 'mds0' or 'osd3'.
43 */
44struct ceph_entity_name {
45 __u8 type; /* CEPH_ENTITY_TYPE_* */
46 __le64 num;
47} __attribute__ ((packed));
48
49#define CEPH_ENTITY_TYPE_MON 0x01
50#define CEPH_ENTITY_TYPE_MDS 0x02
51#define CEPH_ENTITY_TYPE_OSD 0x04
52#define CEPH_ENTITY_TYPE_CLIENT 0x08
53#define CEPH_ENTITY_TYPE_AUTH 0x20
54
55#define CEPH_ENTITY_TYPE_ANY 0xFF
56
57extern const char *ceph_entity_type_name(int type);
58
59/*
60 * entity_addr -- network address
61 */
62struct ceph_entity_addr {
63 __le32 type;
64 __le32 nonce; /* unique id for process (e.g. pid) */
65 struct sockaddr_storage in_addr;
66} __attribute__ ((packed));
67
68struct ceph_entity_inst {
69 struct ceph_entity_name name;
70 struct ceph_entity_addr addr;
71} __attribute__ ((packed));
72
73
74/* used by message exchange protocol */
75#define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */
76#define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */
77#define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing
78 incoming connection */
79#define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again
80 with higher cseq */
81#define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again
82 with higher gseq */
83#define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */
84#define CEPH_MSGR_TAG_MSG 7 /* message */
85#define CEPH_MSGR_TAG_ACK 8 /* message ack */
86#define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */
87#define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */
88#define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */
89#define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */
90
91
92/*
93 * connection negotiation
94 */
95struct ceph_msg_connect {
96 __le64 features; /* supported feature bits */
97 __le32 host_type; /* CEPH_ENTITY_TYPE_* */
98 __le32 global_seq; /* count connections initiated by this host */
99 __le32 connect_seq; /* count connections initiated in this session */
100 __le32 protocol_version;
101 __le32 authorizer_protocol;
102 __le32 authorizer_len;
103 __u8 flags; /* CEPH_MSG_CONNECT_* */
104} __attribute__ ((packed));
105
106struct ceph_msg_connect_reply {
107 __u8 tag;
108 __le64 features; /* feature bits for this session */
109 __le32 global_seq;
110 __le32 connect_seq;
111 __le32 protocol_version;
112 __le32 authorizer_len;
113 __u8 flags;
114} __attribute__ ((packed));
115
116#define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */
117
118
119/*
120 * message header
121 */
122struct ceph_msg_header_old {
123 __le64 seq; /* message seq# for this session */
124 __le64 tid; /* transaction id */
125 __le16 type; /* message type */
126 __le16 priority; /* priority. higher value == higher priority */
127 __le16 version; /* version of message encoding */
128
129 __le32 front_len; /* bytes in main payload */
130 __le32 middle_len;/* bytes in middle payload */
131 __le32 data_len; /* bytes of data payload */
132 __le16 data_off; /* sender: include full offset;
133 receiver: mask against ~PAGE_MASK */
134
135 struct ceph_entity_inst src, orig_src;
136 __le32 reserved;
137 __le32 crc; /* header crc32c */
138} __attribute__ ((packed));
139
140struct ceph_msg_header {
141 __le64 seq; /* message seq# for this session */
142 __le64 tid; /* transaction id */
143 __le16 type; /* message type */
144 __le16 priority; /* priority. higher value == higher priority */
145 __le16 version; /* version of message encoding */
146
147 __le32 front_len; /* bytes in main payload */
148 __le32 middle_len;/* bytes in middle payload */
149 __le32 data_len; /* bytes of data payload */
150 __le16 data_off; /* sender: include full offset;
151 receiver: mask against ~PAGE_MASK */
152
153 struct ceph_entity_name src;
154 __le32 reserved;
155 __le32 crc; /* header crc32c */
156} __attribute__ ((packed));
157
158#define CEPH_MSG_PRIO_LOW 64
159#define CEPH_MSG_PRIO_DEFAULT 127
160#define CEPH_MSG_PRIO_HIGH 196
161#define CEPH_MSG_PRIO_HIGHEST 255
162
163/*
164 * follows data payload
165 */
166struct ceph_msg_footer {
167 __le32 front_crc, middle_crc, data_crc;
168 __u8 flags;
169} __attribute__ ((packed));
170
171#define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */
172#define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */
173
174
175#endif
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
deleted file mode 100644
index 0edb43f1ef26..000000000000
--- a/fs/ceph/osd_client.c
+++ /dev/null
@@ -1,1771 +0,0 @@
1#include "ceph_debug.h"
2
3#include <linux/err.h>
4#include <linux/highmem.h>
5#include <linux/mm.h>
6#include <linux/pagemap.h>
7#include <linux/slab.h>
8#include <linux/uaccess.h>
9#ifdef CONFIG_BLOCK
10#include <linux/bio.h>
11#endif
12
13#include "super.h"
14#include "osd_client.h"
15#include "messenger.h"
16#include "decode.h"
17#include "auth.h"
18#include "pagelist.h"
19
20#define OSD_OP_FRONT_LEN 4096
21#define OSD_OPREPLY_FRONT_LEN 512
22
23static const struct ceph_connection_operations osd_con_ops;
24static int __kick_requests(struct ceph_osd_client *osdc,
25 struct ceph_osd *kickosd);
26
27static void kick_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd);
28
29static int op_needs_trail(int op)
30{
31 switch (op) {
32 case CEPH_OSD_OP_GETXATTR:
33 case CEPH_OSD_OP_SETXATTR:
34 case CEPH_OSD_OP_CMPXATTR:
35 case CEPH_OSD_OP_CALL:
36 return 1;
37 default:
38 return 0;
39 }
40}
41
42static int op_has_extent(int op)
43{
44 return (op == CEPH_OSD_OP_READ ||
45 op == CEPH_OSD_OP_WRITE);
46}
47
48void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
49 struct ceph_file_layout *layout,
50 u64 snapid,
51 u64 off, u64 *plen, u64 *bno,
52 struct ceph_osd_request *req,
53 struct ceph_osd_req_op *op)
54{
55 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
56 u64 orig_len = *plen;
57 u64 objoff, objlen; /* extent in object */
58
59 reqhead->snapid = cpu_to_le64(snapid);
60
61 /* object extent? */
62 ceph_calc_file_object_mapping(layout, off, plen, bno,
63 &objoff, &objlen);
64 if (*plen < orig_len)
65 dout(" skipping last %llu, final file extent %llu~%llu\n",
66 orig_len - *plen, off, *plen);
67
68 if (op_has_extent(op->op)) {
69 op->extent.offset = objoff;
70 op->extent.length = objlen;
71 }
72 req->r_num_pages = calc_pages_for(off, *plen);
73
74 dout("calc_layout bno=%llx %llu~%llu (%d pages)\n",
75 *bno, objoff, objlen, req->r_num_pages);
76
77}
78
79/*
80 * Implement client access to distributed object storage cluster.
81 *
82 * All data objects are stored within a cluster/cloud of OSDs, or
83 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
84 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
85 * remote daemons serving up and coordinating consistent and safe
86 * access to storage.
87 *
88 * Cluster membership and the mapping of data objects onto storage devices
89 * are described by the osd map.
90 *
91 * We keep track of pending OSD requests (read, write), resubmit
92 * requests to different OSDs when the cluster topology/data layout
93 * change, or retry the affected requests when the communications
94 * channel with an OSD is reset.
95 */
96
97/*
98 * calculate the mapping of a file extent onto an object, and fill out the
99 * request accordingly. shorten extent as necessary if it crosses an
100 * object boundary.
101 *
102 * fill osd op in request message.
103 */
104static void calc_layout(struct ceph_osd_client *osdc,
105 struct ceph_vino vino,
106 struct ceph_file_layout *layout,
107 u64 off, u64 *plen,
108 struct ceph_osd_request *req,
109 struct ceph_osd_req_op *op)
110{
111 u64 bno;
112
113 ceph_calc_raw_layout(osdc, layout, vino.snap, off,
114 plen, &bno, req, op);
115
116 sprintf(req->r_oid, "%llx.%08llx", vino.ino, bno);
117 req->r_oid_len = strlen(req->r_oid);
118}
119
120/*
121 * requests
122 */
123void ceph_osdc_release_request(struct kref *kref)
124{
125 struct ceph_osd_request *req = container_of(kref,
126 struct ceph_osd_request,
127 r_kref);
128
129 if (req->r_request)
130 ceph_msg_put(req->r_request);
131 if (req->r_reply)
132 ceph_msg_put(req->r_reply);
133 if (req->r_con_filling_msg) {
134 dout("release_request revoking pages %p from con %p\n",
135 req->r_pages, req->r_con_filling_msg);
136 ceph_con_revoke_message(req->r_con_filling_msg,
137 req->r_reply);
138 ceph_con_put(req->r_con_filling_msg);
139 }
140 if (req->r_own_pages)
141 ceph_release_page_vector(req->r_pages,
142 req->r_num_pages);
143#ifdef CONFIG_BLOCK
144 if (req->r_bio)
145 bio_put(req->r_bio);
146#endif
147 ceph_put_snap_context(req->r_snapc);
148 if (req->r_trail) {
149 ceph_pagelist_release(req->r_trail);
150 kfree(req->r_trail);
151 }
152 if (req->r_mempool)
153 mempool_free(req, req->r_osdc->req_mempool);
154 else
155 kfree(req);
156}
157
158static int op_needs_trail(int op)
159{
160 switch (op) {
161 case CEPH_OSD_OP_GETXATTR:
162 case CEPH_OSD_OP_SETXATTR:
163 case CEPH_OSD_OP_CMPXATTR:
164 case CEPH_OSD_OP_CALL:
165 return 1;
166 default:
167 return 0;
168 }
169}
170
171static int get_num_ops(struct ceph_osd_req_op *ops, int *needs_trail)
172{
173 int i = 0;
174
175 if (needs_trail)
176 *needs_trail = 0;
177 while (ops[i].op) {
178 if (needs_trail && op_needs_trail(ops[i].op))
179 *needs_trail = 1;
180 i++;
181 }
182
183 return i;
184}
185
186struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
187 int flags,
188 struct ceph_snap_context *snapc,
189 struct ceph_osd_req_op *ops,
190 bool use_mempool,
191 gfp_t gfp_flags,
192 struct page **pages,
193 struct bio *bio)
194{
195 struct ceph_osd_request *req;
196 struct ceph_msg *msg;
197 int needs_trail;
198 int num_op = get_num_ops(ops, &needs_trail);
199 size_t msg_size = sizeof(struct ceph_osd_request_head);
200
201 msg_size += num_op*sizeof(struct ceph_osd_op);
202
203 if (use_mempool) {
204 req = mempool_alloc(osdc->req_mempool, gfp_flags);
205 memset(req, 0, sizeof(*req));
206 } else {
207 req = kzalloc(sizeof(*req), gfp_flags);
208 }
209 if (req == NULL)
210 return NULL;
211
212 req->r_osdc = osdc;
213 req->r_mempool = use_mempool;
214
215 kref_init(&req->r_kref);
216 init_completion(&req->r_completion);
217 init_completion(&req->r_safe_completion);
218 INIT_LIST_HEAD(&req->r_unsafe_item);
219 req->r_flags = flags;
220
221 WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0);
222
223 /* create reply message */
224 if (use_mempool)
225 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
226 else
227 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY,
228 OSD_OPREPLY_FRONT_LEN, gfp_flags);
229 if (!msg) {
230 ceph_osdc_put_request(req);
231 return NULL;
232 }
233 req->r_reply = msg;
234
235 /* allocate space for the trailing data */
236 if (needs_trail) {
237 req->r_trail = kmalloc(sizeof(struct ceph_pagelist), gfp_flags);
238 if (!req->r_trail) {
239 ceph_osdc_put_request(req);
240 return NULL;
241 }
242 ceph_pagelist_init(req->r_trail);
243 }
244 /* create request message; allow space for oid */
245 msg_size += 40;
246 if (snapc)
247 msg_size += sizeof(u64) * snapc->num_snaps;
248 if (use_mempool)
249 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
250 else
251 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp_flags);
252 if (!msg) {
253 ceph_osdc_put_request(req);
254 return NULL;
255 }
256
257 msg->hdr.type = cpu_to_le16(CEPH_MSG_OSD_OP);
258 memset(msg->front.iov_base, 0, msg->front.iov_len);
259
260 req->r_request = msg;
261 req->r_pages = pages;
262#ifdef CONFIG_BLOCK
263 if (bio) {
264 req->r_bio = bio;
265 bio_get(req->r_bio);
266 }
267#endif
268
269 return req;
270}
271
272static void osd_req_encode_op(struct ceph_osd_request *req,
273 struct ceph_osd_op *dst,
274 struct ceph_osd_req_op *src)
275{
276 dst->op = cpu_to_le16(src->op);
277
278 switch (dst->op) {
279 case CEPH_OSD_OP_READ:
280 case CEPH_OSD_OP_WRITE:
281 dst->extent.offset =
282 cpu_to_le64(src->extent.offset);
283 dst->extent.length =
284 cpu_to_le64(src->extent.length);
285 dst->extent.truncate_size =
286 cpu_to_le64(src->extent.truncate_size);
287 dst->extent.truncate_seq =
288 cpu_to_le32(src->extent.truncate_seq);
289 break;
290
291 case CEPH_OSD_OP_GETXATTR:
292 case CEPH_OSD_OP_SETXATTR:
293 case CEPH_OSD_OP_CMPXATTR:
294 BUG_ON(!req->r_trail);
295
296 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
297 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
298 dst->xattr.cmp_op = src->xattr.cmp_op;
299 dst->xattr.cmp_mode = src->xattr.cmp_mode;
300 ceph_pagelist_append(req->r_trail, src->xattr.name,
301 src->xattr.name_len);
302 ceph_pagelist_append(req->r_trail, src->xattr.val,
303 src->xattr.value_len);
304 break;
305 case CEPH_OSD_OP_CALL:
306 BUG_ON(!req->r_trail);
307
308 dst->cls.class_len = src->cls.class_len;
309 dst->cls.method_len = src->cls.method_len;
310 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
311
312 ceph_pagelist_append(req->r_trail, src->cls.class_name,
313 src->cls.class_len);
314 ceph_pagelist_append(req->r_trail, src->cls.method_name,
315 src->cls.method_len);
316 ceph_pagelist_append(req->r_trail, src->cls.indata,
317 src->cls.indata_len);
318 break;
319 case CEPH_OSD_OP_ROLLBACK:
320 dst->snap.snapid = cpu_to_le64(src->snap.snapid);
321 break;
322 case CEPH_OSD_OP_STARTSYNC:
323 break;
324 default:
325 pr_err("unrecognized osd opcode %d\n", dst->op);
326 WARN_ON(1);
327 break;
328 }
329 dst->payload_len = cpu_to_le32(src->payload_len);
330}
331
332/*
333 * build new request AND message
334 *
335 */
336void ceph_osdc_build_request(struct ceph_osd_request *req,
337 u64 off, u64 *plen,
338 struct ceph_osd_req_op *src_ops,
339 struct ceph_snap_context *snapc,
340 struct timespec *mtime,
341 const char *oid,
342 int oid_len)
343{
344 struct ceph_msg *msg = req->r_request;
345 struct ceph_osd_request_head *head;
346 struct ceph_osd_req_op *src_op;
347 struct ceph_osd_op *op;
348 void *p;
349 int num_op = get_num_ops(src_ops, NULL);
350 size_t msg_size = sizeof(*head) + num_op*sizeof(*op);
351 int flags = req->r_flags;
352 u64 data_len = 0;
353 int i;
354
355 head = msg->front.iov_base;
356 op = (void *)(head + 1);
357 p = (void *)(op + num_op);
358
359 req->r_snapc = ceph_get_snap_context(snapc);
360
361 head->client_inc = cpu_to_le32(1); /* always, for now. */
362 head->flags = cpu_to_le32(flags);
363 if (flags & CEPH_OSD_FLAG_WRITE)
364 ceph_encode_timespec(&head->mtime, mtime);
365 head->num_ops = cpu_to_le16(num_op);
366
367
368 /* fill in oid */
369 head->object_len = cpu_to_le32(oid_len);
370 memcpy(p, oid, oid_len);
371 p += oid_len;
372
373 src_op = src_ops;
374 while (src_op->op) {
375 osd_req_encode_op(req, op, src_op);
376 src_op++;
377 op++;
378 }
379
380 if (req->r_trail)
381 data_len += req->r_trail->length;
382
383 if (snapc) {
384 head->snap_seq = cpu_to_le64(snapc->seq);
385 head->num_snaps = cpu_to_le32(snapc->num_snaps);
386 for (i = 0; i < snapc->num_snaps; i++) {
387 put_unaligned_le64(snapc->snaps[i], p);
388 p += sizeof(u64);
389 }
390 }
391
392 if (flags & CEPH_OSD_FLAG_WRITE) {
393 req->r_request->hdr.data_off = cpu_to_le16(off);
394 req->r_request->hdr.data_len = cpu_to_le32(*plen + data_len);
395 } else if (data_len) {
396 req->r_request->hdr.data_off = 0;
397 req->r_request->hdr.data_len = cpu_to_le32(data_len);
398 }
399
400 BUG_ON(p > msg->front.iov_base + msg->front.iov_len);
401 msg_size = p - msg->front.iov_base;
402 msg->front.iov_len = msg_size;
403 msg->hdr.front_len = cpu_to_le32(msg_size);
404 return;
405}
406
407/*
408 * build new request AND message, calculate layout, and adjust file
409 * extent as needed.
410 *
411 * if the file was recently truncated, we include information about its
412 * old and new size so that the object can be updated appropriately. (we
413 * avoid synchronously deleting truncated objects because it's slow.)
414 *
415 * if @do_sync, include a 'startsync' command so that the osd will flush
416 * data quickly.
417 */
418struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
419 struct ceph_file_layout *layout,
420 struct ceph_vino vino,
421 u64 off, u64 *plen,
422 int opcode, int flags,
423 struct ceph_snap_context *snapc,
424 int do_sync,
425 u32 truncate_seq,
426 u64 truncate_size,
427 struct timespec *mtime,
428 bool use_mempool, int num_reply)
429{
430 struct ceph_osd_req_op ops[3];
431 struct ceph_osd_request *req;
432
433 ops[0].op = opcode;
434 ops[0].extent.truncate_seq = truncate_seq;
435 ops[0].extent.truncate_size = truncate_size;
436 ops[0].payload_len = 0;
437
438 if (do_sync) {
439 ops[1].op = CEPH_OSD_OP_STARTSYNC;
440 ops[1].payload_len = 0;
441 ops[2].op = 0;
442 } else
443 ops[1].op = 0;
444
445 req = ceph_osdc_alloc_request(osdc, flags,
446 snapc, ops,
447 use_mempool,
448 GFP_NOFS, NULL, NULL);
449 if (IS_ERR(req))
450 return req;
451
452 /* calculate max write size */
453 calc_layout(osdc, vino, layout, off, plen, req, ops);
454 req->r_file_layout = *layout; /* keep a copy */
455
456 ceph_osdc_build_request(req, off, plen, ops,
457 snapc,
458 mtime,
459 req->r_oid, req->r_oid_len);
460
461 return req;
462}
463
464/*
465 * We keep osd requests in an rbtree, sorted by ->r_tid.
466 */
467static void __insert_request(struct ceph_osd_client *osdc,
468 struct ceph_osd_request *new)
469{
470 struct rb_node **p = &osdc->requests.rb_node;
471 struct rb_node *parent = NULL;
472 struct ceph_osd_request *req = NULL;
473
474 while (*p) {
475 parent = *p;
476 req = rb_entry(parent, struct ceph_osd_request, r_node);
477 if (new->r_tid < req->r_tid)
478 p = &(*p)->rb_left;
479 else if (new->r_tid > req->r_tid)
480 p = &(*p)->rb_right;
481 else
482 BUG();
483 }
484
485 rb_link_node(&new->r_node, parent, p);
486 rb_insert_color(&new->r_node, &osdc->requests);
487}
488
489static struct ceph_osd_request *__lookup_request(struct ceph_osd_client *osdc,
490 u64 tid)
491{
492 struct ceph_osd_request *req;
493 struct rb_node *n = osdc->requests.rb_node;
494
495 while (n) {
496 req = rb_entry(n, struct ceph_osd_request, r_node);
497 if (tid < req->r_tid)
498 n = n->rb_left;
499 else if (tid > req->r_tid)
500 n = n->rb_right;
501 else
502 return req;
503 }
504 return NULL;
505}
506
507static struct ceph_osd_request *
508__lookup_request_ge(struct ceph_osd_client *osdc,
509 u64 tid)
510{
511 struct ceph_osd_request *req;
512 struct rb_node *n = osdc->requests.rb_node;
513
514 while (n) {
515 req = rb_entry(n, struct ceph_osd_request, r_node);
516 if (tid < req->r_tid) {
517 if (!n->rb_left)
518 return req;
519 n = n->rb_left;
520 } else if (tid > req->r_tid) {
521 n = n->rb_right;
522 } else {
523 return req;
524 }
525 }
526 return NULL;
527}
528
529
530/*
531 * If the osd connection drops, we need to resubmit all requests.
532 */
533static void osd_reset(struct ceph_connection *con)
534{
535 struct ceph_osd *osd = con->private;
536 struct ceph_osd_client *osdc;
537
538 if (!osd)
539 return;
540 dout("osd_reset osd%d\n", osd->o_osd);
541 osdc = osd->o_osdc;
542 down_read(&osdc->map_sem);
543 kick_requests(osdc, osd);
544 up_read(&osdc->map_sem);
545}
546
547/*
548 * Track open sessions with osds.
549 */
550static struct ceph_osd *create_osd(struct ceph_osd_client *osdc)
551{
552 struct ceph_osd *osd;
553
554 osd = kzalloc(sizeof(*osd), GFP_NOFS);
555 if (!osd)
556 return NULL;
557
558 atomic_set(&osd->o_ref, 1);
559 osd->o_osdc = osdc;
560 INIT_LIST_HEAD(&osd->o_requests);
561 INIT_LIST_HEAD(&osd->o_osd_lru);
562 osd->o_incarnation = 1;
563
564 ceph_con_init(osdc->client->msgr, &osd->o_con);
565 osd->o_con.private = osd;
566 osd->o_con.ops = &osd_con_ops;
567 osd->o_con.peer_name.type = CEPH_ENTITY_TYPE_OSD;
568
569 INIT_LIST_HEAD(&osd->o_keepalive_item);
570 return osd;
571}
572
573static struct ceph_osd *get_osd(struct ceph_osd *osd)
574{
575 if (atomic_inc_not_zero(&osd->o_ref)) {
576 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
577 atomic_read(&osd->o_ref));
578 return osd;
579 } else {
580 dout("get_osd %p FAIL\n", osd);
581 return NULL;
582 }
583}
584
585static void put_osd(struct ceph_osd *osd)
586{
587 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
588 atomic_read(&osd->o_ref) - 1);
589 if (atomic_dec_and_test(&osd->o_ref)) {
590 struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
591
592 if (osd->o_authorizer)
593 ac->ops->destroy_authorizer(ac, osd->o_authorizer);
594 kfree(osd);
595 }
596}
597
598/*
599 * remove an osd from our map
600 */
601static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
602{
603 dout("__remove_osd %p\n", osd);
604 BUG_ON(!list_empty(&osd->o_requests));
605 rb_erase(&osd->o_node, &osdc->osds);
606 list_del_init(&osd->o_osd_lru);
607 ceph_con_close(&osd->o_con);
608 put_osd(osd);
609}
610
611static void __move_osd_to_lru(struct ceph_osd_client *osdc,
612 struct ceph_osd *osd)
613{
614 dout("__move_osd_to_lru %p\n", osd);
615 BUG_ON(!list_empty(&osd->o_osd_lru));
616 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
617 osd->lru_ttl = jiffies + osdc->client->mount_args->osd_idle_ttl * HZ;
618}
619
620static void __remove_osd_from_lru(struct ceph_osd *osd)
621{
622 dout("__remove_osd_from_lru %p\n", osd);
623 if (!list_empty(&osd->o_osd_lru))
624 list_del_init(&osd->o_osd_lru);
625}
626
627static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
628{
629 struct ceph_osd *osd, *nosd;
630
631 dout("__remove_old_osds %p\n", osdc);
632 mutex_lock(&osdc->request_mutex);
633 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
634 if (!remove_all && time_before(jiffies, osd->lru_ttl))
635 break;
636 __remove_osd(osdc, osd);
637 }
638 mutex_unlock(&osdc->request_mutex);
639}
640
641/*
642 * reset osd connect
643 */
644static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
645{
646 struct ceph_osd_request *req;
647 int ret = 0;
648
649 dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
650 if (list_empty(&osd->o_requests)) {
651 __remove_osd(osdc, osd);
652 } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
653 &osd->o_con.peer_addr,
654 sizeof(osd->o_con.peer_addr)) == 0 &&
655 !ceph_con_opened(&osd->o_con)) {
656 dout(" osd addr hasn't changed and connection never opened,"
657 " letting msgr retry");
658 /* touch each r_stamp for handle_timeout()'s benfit */
659 list_for_each_entry(req, &osd->o_requests, r_osd_item)
660 req->r_stamp = jiffies;
661 ret = -EAGAIN;
662 } else {
663 ceph_con_close(&osd->o_con);
664 ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
665 osd->o_incarnation++;
666 }
667 return ret;
668}
669
670static void __insert_osd(struct ceph_osd_client *osdc, struct ceph_osd *new)
671{
672 struct rb_node **p = &osdc->osds.rb_node;
673 struct rb_node *parent = NULL;
674 struct ceph_osd *osd = NULL;
675
676 while (*p) {
677 parent = *p;
678 osd = rb_entry(parent, struct ceph_osd, o_node);
679 if (new->o_osd < osd->o_osd)
680 p = &(*p)->rb_left;
681 else if (new->o_osd > osd->o_osd)
682 p = &(*p)->rb_right;
683 else
684 BUG();
685 }
686
687 rb_link_node(&new->o_node, parent, p);
688 rb_insert_color(&new->o_node, &osdc->osds);
689}
690
691static struct ceph_osd *__lookup_osd(struct ceph_osd_client *osdc, int o)
692{
693 struct ceph_osd *osd;
694 struct rb_node *n = osdc->osds.rb_node;
695
696 while (n) {
697 osd = rb_entry(n, struct ceph_osd, o_node);
698 if (o < osd->o_osd)
699 n = n->rb_left;
700 else if (o > osd->o_osd)
701 n = n->rb_right;
702 else
703 return osd;
704 }
705 return NULL;
706}
707
708static void __schedule_osd_timeout(struct ceph_osd_client *osdc)
709{
710 schedule_delayed_work(&osdc->timeout_work,
711 osdc->client->mount_args->osd_keepalive_timeout * HZ);
712}
713
714static void __cancel_osd_timeout(struct ceph_osd_client *osdc)
715{
716 cancel_delayed_work(&osdc->timeout_work);
717}
718
719/*
720 * Register request, assign tid. If this is the first request, set up
721 * the timeout event.
722 */
723static void register_request(struct ceph_osd_client *osdc,
724 struct ceph_osd_request *req)
725{
726 mutex_lock(&osdc->request_mutex);
727 req->r_tid = ++osdc->last_tid;
728 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
729 INIT_LIST_HEAD(&req->r_req_lru_item);
730
731 dout("register_request %p tid %lld\n", req, req->r_tid);
732 __insert_request(osdc, req);
733 ceph_osdc_get_request(req);
734 osdc->num_requests++;
735
736 if (osdc->num_requests == 1) {
737 dout(" first request, scheduling timeout\n");
738 __schedule_osd_timeout(osdc);
739 }
740 mutex_unlock(&osdc->request_mutex);
741}
742
743/*
744 * called under osdc->request_mutex
745 */
746static void __unregister_request(struct ceph_osd_client *osdc,
747 struct ceph_osd_request *req)
748{
749 dout("__unregister_request %p tid %lld\n", req, req->r_tid);
750 rb_erase(&req->r_node, &osdc->requests);
751 osdc->num_requests--;
752
753 if (req->r_osd) {
754 /* make sure the original request isn't in flight. */
755 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
756
757 list_del_init(&req->r_osd_item);
758 if (list_empty(&req->r_osd->o_requests))
759 __move_osd_to_lru(osdc, req->r_osd);
760 req->r_osd = NULL;
761 }
762
763 ceph_osdc_put_request(req);
764
765 list_del_init(&req->r_req_lru_item);
766 if (osdc->num_requests == 0) {
767 dout(" no requests, canceling timeout\n");
768 __cancel_osd_timeout(osdc);
769 }
770}
771
772/*
773 * Cancel a previously queued request message
774 */
775static void __cancel_request(struct ceph_osd_request *req)
776{
777 if (req->r_sent && req->r_osd) {
778 ceph_con_revoke(&req->r_osd->o_con, req->r_request);
779 req->r_sent = 0;
780 }
781 list_del_init(&req->r_req_lru_item);
782}
783
784/*
785 * Pick an osd (the first 'up' osd in the pg), allocate the osd struct
786 * (as needed), and set the request r_osd appropriately. If there is
787 * no up osd, set r_osd to NULL.
788 *
789 * Return 0 if unchanged, 1 if changed, or negative on error.
790 *
791 * Caller should hold map_sem for read and request_mutex.
792 */
793static int __map_osds(struct ceph_osd_client *osdc,
794 struct ceph_osd_request *req)
795{
796 struct ceph_osd_request_head *reqhead = req->r_request->front.iov_base;
797 struct ceph_pg pgid;
798 int acting[CEPH_PG_MAX_SIZE];
799 int o = -1, num = 0;
800 int err;
801
802 dout("map_osds %p tid %lld\n", req, req->r_tid);
803 err = ceph_calc_object_layout(&reqhead->layout, req->r_oid,
804 &req->r_file_layout, osdc->osdmap);
805 if (err)
806 return err;
807 pgid = reqhead->layout.ol_pgid;
808 req->r_pgid = pgid;
809
810 err = ceph_calc_pg_acting(osdc->osdmap, pgid, acting);
811 if (err > 0) {
812 o = acting[0];
813 num = err;
814 }
815
816 if ((req->r_osd && req->r_osd->o_osd == o &&
817 req->r_sent >= req->r_osd->o_incarnation &&
818 req->r_num_pg_osds == num &&
819 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) ||
820 (req->r_osd == NULL && o == -1))
821 return 0; /* no change */
822
823 dout("map_osds tid %llu pgid %d.%x osd%d (was osd%d)\n",
824 req->r_tid, le32_to_cpu(pgid.pool), le16_to_cpu(pgid.ps), o,
825 req->r_osd ? req->r_osd->o_osd : -1);
826
827 /* record full pg acting set */
828 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num);
829 req->r_num_pg_osds = num;
830
831 if (req->r_osd) {
832 __cancel_request(req);
833 list_del_init(&req->r_osd_item);
834 req->r_osd = NULL;
835 }
836
837 req->r_osd = __lookup_osd(osdc, o);
838 if (!req->r_osd && o >= 0) {
839 err = -ENOMEM;
840 req->r_osd = create_osd(osdc);
841 if (!req->r_osd)
842 goto out;
843
844 dout("map_osds osd %p is osd%d\n", req->r_osd, o);
845 req->r_osd->o_osd = o;
846 req->r_osd->o_con.peer_name.num = cpu_to_le64(o);
847 __insert_osd(osdc, req->r_osd);
848
849 ceph_con_open(&req->r_osd->o_con, &osdc->osdmap->osd_addr[o]);
850 }
851
852 if (req->r_osd) {
853 __remove_osd_from_lru(req->r_osd);
854 list_add(&req->r_osd_item, &req->r_osd->o_requests);
855 }
856 err = 1; /* osd or pg changed */
857
858out:
859 return err;
860}
861
862/*
863 * caller should hold map_sem (for read) and request_mutex
864 */
865static int __send_request(struct ceph_osd_client *osdc,
866 struct ceph_osd_request *req)
867{
868 struct ceph_osd_request_head *reqhead;
869 int err;
870
871 err = __map_osds(osdc, req);
872 if (err < 0)
873 return err;
874 if (req->r_osd == NULL) {
875 dout("send_request %p no up osds in pg\n", req);
876 ceph_monc_request_next_osdmap(&osdc->client->monc);
877 return 0;
878 }
879
880 dout("send_request %p tid %llu to osd%d flags %d\n",
881 req, req->r_tid, req->r_osd->o_osd, req->r_flags);
882
883 reqhead = req->r_request->front.iov_base;
884 reqhead->osdmap_epoch = cpu_to_le32(osdc->osdmap->epoch);
885 reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
886 reqhead->reassert_version = req->r_reassert_version;
887
888 req->r_stamp = jiffies;
889 list_move_tail(&req->r_req_lru_item, &osdc->req_lru);
890
891 ceph_msg_get(req->r_request); /* send consumes a ref */
892 ceph_con_send(&req->r_osd->o_con, req->r_request);
893 req->r_sent = req->r_osd->o_incarnation;
894 return 0;
895}
896
897/*
898 * Timeout callback, called every N seconds when 1 or more osd
899 * requests has been active for more than N seconds. When this
900 * happens, we ping all OSDs with requests who have timed out to
901 * ensure any communications channel reset is detected. Reset the
902 * request timeouts another N seconds in the future as we go.
903 * Reschedule the timeout event another N seconds in future (unless
904 * there are no open requests).
905 */
906static void handle_timeout(struct work_struct *work)
907{
908 struct ceph_osd_client *osdc =
909 container_of(work, struct ceph_osd_client, timeout_work.work);
910 struct ceph_osd_request *req, *last_req = NULL;
911 struct ceph_osd *osd;
912 unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
913 unsigned long keepalive =
914 osdc->client->mount_args->osd_keepalive_timeout * HZ;
915 unsigned long last_stamp = 0;
916 struct rb_node *p;
917 struct list_head slow_osds;
918
919 dout("timeout\n");
920 down_read(&osdc->map_sem);
921
922 ceph_monc_request_next_osdmap(&osdc->client->monc);
923
924 mutex_lock(&osdc->request_mutex);
925 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
926 req = rb_entry(p, struct ceph_osd_request, r_node);
927
928 if (req->r_resend) {
929 int err;
930
931 dout("osdc resending prev failed %lld\n", req->r_tid);
932 err = __send_request(osdc, req);
933 if (err)
934 dout("osdc failed again on %lld\n", req->r_tid);
935 else
936 req->r_resend = false;
937 continue;
938 }
939 }
940
941 /*
942 * reset osds that appear to be _really_ unresponsive. this
943 * is a failsafe measure.. we really shouldn't be getting to
944 * this point if the system is working properly. the monitors
945 * should mark the osd as failed and we should find out about
946 * it from an updated osd map.
947 */
948 while (timeout && !list_empty(&osdc->req_lru)) {
949 req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
950 r_req_lru_item);
951
952 if (time_before(jiffies, req->r_stamp + timeout))
953 break;
954
955 BUG_ON(req == last_req && req->r_stamp == last_stamp);
956 last_req = req;
957 last_stamp = req->r_stamp;
958
959 osd = req->r_osd;
960 BUG_ON(!osd);
961 pr_warning(" tid %llu timed out on osd%d, will reset osd\n",
962 req->r_tid, osd->o_osd);
963 __kick_requests(osdc, osd);
964 }
965
966 /*
967 * ping osds that are a bit slow. this ensures that if there
968 * is a break in the TCP connection we will notice, and reopen
969 * a connection with that osd (from the fault callback).
970 */
971 INIT_LIST_HEAD(&slow_osds);
972 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
973 if (time_before(jiffies, req->r_stamp + keepalive))
974 break;
975
976 osd = req->r_osd;
977 BUG_ON(!osd);
978 dout(" tid %llu is slow, will send keepalive on osd%d\n",
979 req->r_tid, osd->o_osd);
980 list_move_tail(&osd->o_keepalive_item, &slow_osds);
981 }
982 while (!list_empty(&slow_osds)) {
983 osd = list_entry(slow_osds.next, struct ceph_osd,
984 o_keepalive_item);
985 list_del_init(&osd->o_keepalive_item);
986 ceph_con_keepalive(&osd->o_con);
987 }
988
989 __schedule_osd_timeout(osdc);
990 mutex_unlock(&osdc->request_mutex);
991
992 up_read(&osdc->map_sem);
993}
994
995static void handle_osds_timeout(struct work_struct *work)
996{
997 struct ceph_osd_client *osdc =
998 container_of(work, struct ceph_osd_client,
999 osds_timeout_work.work);
1000 unsigned long delay =
1001 osdc->client->mount_args->osd_idle_ttl * HZ >> 2;
1002
1003 dout("osds timeout\n");
1004 down_read(&osdc->map_sem);
1005 remove_old_osds(osdc, 0);
1006 up_read(&osdc->map_sem);
1007
1008 schedule_delayed_work(&osdc->osds_timeout_work,
1009 round_jiffies_relative(delay));
1010}
1011
1012/*
1013 * handle osd op reply. either call the callback if it is specified,
1014 * or do the completion to wake up the waiting thread.
1015 */
1016static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1017 struct ceph_connection *con)
1018{
1019 struct ceph_osd_reply_head *rhead = msg->front.iov_base;
1020 struct ceph_osd_request *req;
1021 u64 tid;
1022 int numops, object_len, flags;
1023 s32 result;
1024
1025 tid = le64_to_cpu(msg->hdr.tid);
1026 if (msg->front.iov_len < sizeof(*rhead))
1027 goto bad;
1028 numops = le32_to_cpu(rhead->num_ops);
1029 object_len = le32_to_cpu(rhead->object_len);
1030 result = le32_to_cpu(rhead->result);
1031 if (msg->front.iov_len != sizeof(*rhead) + object_len +
1032 numops * sizeof(struct ceph_osd_op))
1033 goto bad;
1034 dout("handle_reply %p tid %llu result %d\n", msg, tid, (int)result);
1035
1036 /* lookup */
1037 mutex_lock(&osdc->request_mutex);
1038 req = __lookup_request(osdc, tid);
1039 if (req == NULL) {
1040 dout("handle_reply tid %llu dne\n", tid);
1041 mutex_unlock(&osdc->request_mutex);
1042 return;
1043 }
1044 ceph_osdc_get_request(req);
1045 flags = le32_to_cpu(rhead->flags);
1046
1047 /*
1048 * if this connection filled our message, drop our reference now, to
1049 * avoid a (safe but slower) revoke later.
1050 */
1051 if (req->r_con_filling_msg == con && req->r_reply == msg) {
1052 dout(" dropping con_filling_msg ref %p\n", con);
1053 req->r_con_filling_msg = NULL;
1054 ceph_con_put(con);
1055 }
1056
1057 if (!req->r_got_reply) {
1058 unsigned bytes;
1059
1060 req->r_result = le32_to_cpu(rhead->result);
1061 bytes = le32_to_cpu(msg->hdr.data_len);
1062 dout("handle_reply result %d bytes %d\n", req->r_result,
1063 bytes);
1064 if (req->r_result == 0)
1065 req->r_result = bytes;
1066
1067 /* in case this is a write and we need to replay, */
1068 req->r_reassert_version = rhead->reassert_version;
1069
1070 req->r_got_reply = 1;
1071 } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
1072 dout("handle_reply tid %llu dup ack\n", tid);
1073 mutex_unlock(&osdc->request_mutex);
1074 goto done;
1075 }
1076
1077 dout("handle_reply tid %llu flags %d\n", tid, flags);
1078
1079 /* either this is a read, or we got the safe response */
1080 if (result < 0 ||
1081 (flags & CEPH_OSD_FLAG_ONDISK) ||
1082 ((flags & CEPH_OSD_FLAG_WRITE) == 0))
1083 __unregister_request(osdc, req);
1084
1085 mutex_unlock(&osdc->request_mutex);
1086
1087 if (req->r_callback)
1088 req->r_callback(req, msg);
1089 else
1090 complete_all(&req->r_completion);
1091
1092 if (flags & CEPH_OSD_FLAG_ONDISK) {
1093 if (req->r_safe_callback)
1094 req->r_safe_callback(req, msg);
1095 complete_all(&req->r_safe_completion); /* fsync waiter */
1096 }
1097
1098done:
1099 ceph_osdc_put_request(req);
1100 return;
1101
1102bad:
1103 pr_err("corrupt osd_op_reply got %d %d expected %d\n",
1104 (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len),
1105 (int)sizeof(*rhead));
1106 ceph_msg_dump(msg);
1107}
1108
1109
1110static int __kick_requests(struct ceph_osd_client *osdc,
1111 struct ceph_osd *kickosd)
1112{
1113 struct ceph_osd_request *req;
1114 struct rb_node *p, *n;
1115 int needmap = 0;
1116 int err;
1117
1118 dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
1119 if (kickosd) {
1120 err = __reset_osd(osdc, kickosd);
1121 if (err == -EAGAIN)
1122 return 1;
1123 } else {
1124 for (p = rb_first(&osdc->osds); p; p = n) {
1125 struct ceph_osd *osd =
1126 rb_entry(p, struct ceph_osd, o_node);
1127
1128 n = rb_next(p);
1129 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
1130 memcmp(&osd->o_con.peer_addr,
1131 ceph_osd_addr(osdc->osdmap,
1132 osd->o_osd),
1133 sizeof(struct ceph_entity_addr)) != 0)
1134 __reset_osd(osdc, osd);
1135 }
1136 }
1137
1138 for (p = rb_first(&osdc->requests); p; p = rb_next(p)) {
1139 req = rb_entry(p, struct ceph_osd_request, r_node);
1140
1141 if (req->r_resend) {
1142 dout(" r_resend set on tid %llu\n", req->r_tid);
1143 __cancel_request(req);
1144 goto kick;
1145 }
1146 if (req->r_osd && kickosd == req->r_osd) {
1147 __cancel_request(req);
1148 goto kick;
1149 }
1150
1151 err = __map_osds(osdc, req);
1152 if (err == 0)
1153 continue; /* no change */
1154 if (err < 0) {
1155 /*
1156 * FIXME: really, we should set the request
1157 * error and fail if this isn't a 'nofail'
1158 * request, but that's a fair bit more
1159 * complicated to do. So retry!
1160 */
1161 dout(" setting r_resend on %llu\n", req->r_tid);
1162 req->r_resend = true;
1163 continue;
1164 }
1165 if (req->r_osd == NULL) {
1166 dout("tid %llu maps to no valid osd\n", req->r_tid);
1167 needmap++; /* request a newer map */
1168 continue;
1169 }
1170
1171kick:
1172 dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
1173 req->r_osd ? req->r_osd->o_osd : -1);
1174 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1175 err = __send_request(osdc, req);
1176 if (err) {
1177 dout(" setting r_resend on %llu\n", req->r_tid);
1178 req->r_resend = true;
1179 }
1180 }
1181
1182 return needmap;
1183}
1184
1185/*
1186 * Resubmit osd requests whose osd or osd address has changed. Request
1187 * a new osd map if osds are down, or we are otherwise unable to determine
1188 * how to direct a request.
1189 *
1190 * Close connections to down osds.
1191 *
1192 * If @who is specified, resubmit requests for that specific osd.
1193 *
1194 * Caller should hold map_sem for read and request_mutex.
1195 */
1196static void kick_requests(struct ceph_osd_client *osdc,
1197 struct ceph_osd *kickosd)
1198{
1199 int needmap;
1200
1201 mutex_lock(&osdc->request_mutex);
1202 needmap = __kick_requests(osdc, kickosd);
1203 mutex_unlock(&osdc->request_mutex);
1204
1205 if (needmap) {
1206 dout("%d requests for down osds, need new map\n", needmap);
1207 ceph_monc_request_next_osdmap(&osdc->client->monc);
1208 }
1209
1210}
1211/*
1212 * Process updated osd map.
1213 *
1214 * The message contains any number of incremental and full maps, normally
1215 * indicating some sort of topology change in the cluster. Kick requests
1216 * off to different OSDs as needed.
1217 */
1218void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
1219{
1220 void *p, *end, *next;
1221 u32 nr_maps, maplen;
1222 u32 epoch;
1223 struct ceph_osdmap *newmap = NULL, *oldmap;
1224 int err;
1225 struct ceph_fsid fsid;
1226
1227 dout("handle_map have %u\n", osdc->osdmap ? osdc->osdmap->epoch : 0);
1228 p = msg->front.iov_base;
1229 end = p + msg->front.iov_len;
1230
1231 /* verify fsid */
1232 ceph_decode_need(&p, end, sizeof(fsid), bad);
1233 ceph_decode_copy(&p, &fsid, sizeof(fsid));
1234 if (ceph_check_fsid(osdc->client, &fsid) < 0)
1235 return;
1236
1237 down_write(&osdc->map_sem);
1238
1239 /* incremental maps */
1240 ceph_decode_32_safe(&p, end, nr_maps, bad);
1241 dout(" %d inc maps\n", nr_maps);
1242 while (nr_maps > 0) {
1243 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1244 epoch = ceph_decode_32(&p);
1245 maplen = ceph_decode_32(&p);
1246 ceph_decode_need(&p, end, maplen, bad);
1247 next = p + maplen;
1248 if (osdc->osdmap && osdc->osdmap->epoch+1 == epoch) {
1249 dout("applying incremental map %u len %d\n",
1250 epoch, maplen);
1251 newmap = osdmap_apply_incremental(&p, next,
1252 osdc->osdmap,
1253 osdc->client->msgr);
1254 if (IS_ERR(newmap)) {
1255 err = PTR_ERR(newmap);
1256 goto bad;
1257 }
1258 BUG_ON(!newmap);
1259 if (newmap != osdc->osdmap) {
1260 ceph_osdmap_destroy(osdc->osdmap);
1261 osdc->osdmap = newmap;
1262 }
1263 } else {
1264 dout("ignoring incremental map %u len %d\n",
1265 epoch, maplen);
1266 }
1267 p = next;
1268 nr_maps--;
1269 }
1270 if (newmap)
1271 goto done;
1272
1273 /* full maps */
1274 ceph_decode_32_safe(&p, end, nr_maps, bad);
1275 dout(" %d full maps\n", nr_maps);
1276 while (nr_maps) {
1277 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
1278 epoch = ceph_decode_32(&p);
1279 maplen = ceph_decode_32(&p);
1280 ceph_decode_need(&p, end, maplen, bad);
1281 if (nr_maps > 1) {
1282 dout("skipping non-latest full map %u len %d\n",
1283 epoch, maplen);
1284 } else if (osdc->osdmap && osdc->osdmap->epoch >= epoch) {
1285 dout("skipping full map %u len %d, "
1286 "older than our %u\n", epoch, maplen,
1287 osdc->osdmap->epoch);
1288 } else {
1289 dout("taking full map %u len %d\n", epoch, maplen);
1290 newmap = osdmap_decode(&p, p+maplen);
1291 if (IS_ERR(newmap)) {
1292 err = PTR_ERR(newmap);
1293 goto bad;
1294 }
1295 BUG_ON(!newmap);
1296 oldmap = osdc->osdmap;
1297 osdc->osdmap = newmap;
1298 if (oldmap)
1299 ceph_osdmap_destroy(oldmap);
1300 }
1301 p += maplen;
1302 nr_maps--;
1303 }
1304
1305done:
1306 downgrade_write(&osdc->map_sem);
1307 ceph_monc_got_osdmap(&osdc->client->monc, osdc->osdmap->epoch);
1308 if (newmap)
1309 kick_requests(osdc, NULL);
1310 up_read(&osdc->map_sem);
1311 wake_up_all(&osdc->client->auth_wq);
1312 return;
1313
1314bad:
1315 pr_err("osdc handle_map corrupt msg\n");
1316 ceph_msg_dump(msg);
1317 up_write(&osdc->map_sem);
1318 return;
1319}
1320
1321/*
1322 * Register request, send initial attempt.
1323 */
1324int ceph_osdc_start_request(struct ceph_osd_client *osdc,
1325 struct ceph_osd_request *req,
1326 bool nofail)
1327{
1328 int rc = 0;
1329
1330 req->r_request->pages = req->r_pages;
1331 req->r_request->nr_pages = req->r_num_pages;
1332#ifdef CONFIG_BLOCK
1333 req->r_request->bio = req->r_bio;
1334#endif
1335 req->r_request->trail = req->r_trail;
1336
1337 register_request(osdc, req);
1338
1339 down_read(&osdc->map_sem);
1340 mutex_lock(&osdc->request_mutex);
1341 /*
1342 * a racing kick_requests() may have sent the message for us
1343 * while we dropped request_mutex above, so only send now if
1344 * the request still han't been touched yet.
1345 */
1346 if (req->r_sent == 0) {
1347 rc = __send_request(osdc, req);
1348 if (rc) {
1349 if (nofail) {
1350 dout("osdc_start_request failed send, "
1351 " marking %lld\n", req->r_tid);
1352 req->r_resend = true;
1353 rc = 0;
1354 } else {
1355 __unregister_request(osdc, req);
1356 }
1357 }
1358 }
1359 mutex_unlock(&osdc->request_mutex);
1360 up_read(&osdc->map_sem);
1361 return rc;
1362}
1363
1364/*
1365 * wait for a request to complete
1366 */
1367int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1368 struct ceph_osd_request *req)
1369{
1370 int rc;
1371
1372 rc = wait_for_completion_interruptible(&req->r_completion);
1373 if (rc < 0) {
1374 mutex_lock(&osdc->request_mutex);
1375 __cancel_request(req);
1376 __unregister_request(osdc, req);
1377 mutex_unlock(&osdc->request_mutex);
1378 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
1379 return rc;
1380 }
1381
1382 dout("wait_request tid %llu result %d\n", req->r_tid, req->r_result);
1383 return req->r_result;
1384}
1385
1386/*
1387 * sync - wait for all in-flight requests to flush. avoid starvation.
1388 */
1389void ceph_osdc_sync(struct ceph_osd_client *osdc)
1390{
1391 struct ceph_osd_request *req;
1392 u64 last_tid, next_tid = 0;
1393
1394 mutex_lock(&osdc->request_mutex);
1395 last_tid = osdc->last_tid;
1396 while (1) {
1397 req = __lookup_request_ge(osdc, next_tid);
1398 if (!req)
1399 break;
1400 if (req->r_tid > last_tid)
1401 break;
1402
1403 next_tid = req->r_tid + 1;
1404 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0)
1405 continue;
1406
1407 ceph_osdc_get_request(req);
1408 mutex_unlock(&osdc->request_mutex);
1409 dout("sync waiting on tid %llu (last is %llu)\n",
1410 req->r_tid, last_tid);
1411 wait_for_completion(&req->r_safe_completion);
1412 mutex_lock(&osdc->request_mutex);
1413 ceph_osdc_put_request(req);
1414 }
1415 mutex_unlock(&osdc->request_mutex);
1416 dout("sync done (thru tid %llu)\n", last_tid);
1417}
1418
1419/*
1420 * init, shutdown
1421 */
1422int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
1423{
1424 int err;
1425
1426 dout("init\n");
1427 osdc->client = client;
1428 osdc->osdmap = NULL;
1429 init_rwsem(&osdc->map_sem);
1430 init_completion(&osdc->map_waiters);
1431 osdc->last_requested_map = 0;
1432 mutex_init(&osdc->request_mutex);
1433 osdc->last_tid = 0;
1434 osdc->osds = RB_ROOT;
1435 INIT_LIST_HEAD(&osdc->osd_lru);
1436 osdc->requests = RB_ROOT;
1437 INIT_LIST_HEAD(&osdc->req_lru);
1438 osdc->num_requests = 0;
1439 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
1440 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
1441
1442 schedule_delayed_work(&osdc->osds_timeout_work,
1443 round_jiffies_relative(osdc->client->mount_args->osd_idle_ttl * HZ));
1444
1445 err = -ENOMEM;
1446 osdc->req_mempool = mempool_create_kmalloc_pool(10,
1447 sizeof(struct ceph_osd_request));
1448 if (!osdc->req_mempool)
1449 goto out;
1450
1451 err = ceph_msgpool_init(&osdc->msgpool_op, OSD_OP_FRONT_LEN, 10, true,
1452 "osd_op");
1453 if (err < 0)
1454 goto out_mempool;
1455 err = ceph_msgpool_init(&osdc->msgpool_op_reply,
1456 OSD_OPREPLY_FRONT_LEN, 10, true,
1457 "osd_op_reply");
1458 if (err < 0)
1459 goto out_msgpool;
1460 return 0;
1461
1462out_msgpool:
1463 ceph_msgpool_destroy(&osdc->msgpool_op);
1464out_mempool:
1465 mempool_destroy(osdc->req_mempool);
1466out:
1467 return err;
1468}
1469
1470void ceph_osdc_stop(struct ceph_osd_client *osdc)
1471{
1472 cancel_delayed_work_sync(&osdc->timeout_work);
1473 cancel_delayed_work_sync(&osdc->osds_timeout_work);
1474 if (osdc->osdmap) {
1475 ceph_osdmap_destroy(osdc->osdmap);
1476 osdc->osdmap = NULL;
1477 }
1478 remove_old_osds(osdc, 1);
1479 mempool_destroy(osdc->req_mempool);
1480 ceph_msgpool_destroy(&osdc->msgpool_op);
1481 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
1482}
1483
1484/*
1485 * Read some contiguous pages. If we cross a stripe boundary, shorten
1486 * *plen. Return number of bytes read, or error.
1487 */
1488int ceph_osdc_readpages(struct ceph_osd_client *osdc,
1489 struct ceph_vino vino, struct ceph_file_layout *layout,
1490 u64 off, u64 *plen,
1491 u32 truncate_seq, u64 truncate_size,
1492 struct page **pages, int num_pages)
1493{
1494 struct ceph_osd_request *req;
1495 int rc = 0;
1496
1497 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
1498 vino.snap, off, *plen);
1499 req = ceph_osdc_new_request(osdc, layout, vino, off, plen,
1500 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
1501 NULL, 0, truncate_seq, truncate_size, NULL,
1502 false, 1);
1503 if (!req)
1504 return -ENOMEM;
1505
1506 /* it may be a short read due to an object boundary */
1507 req->r_pages = pages;
1508
1509 dout("readpages final extent is %llu~%llu (%d pages)\n",
1510 off, *plen, req->r_num_pages);
1511
1512 rc = ceph_osdc_start_request(osdc, req, false);
1513 if (!rc)
1514 rc = ceph_osdc_wait_request(osdc, req);
1515
1516 ceph_osdc_put_request(req);
1517 dout("readpages result %d\n", rc);
1518 return rc;
1519}
1520
1521/*
1522 * do a synchronous write on N pages
1523 */
1524int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
1525 struct ceph_file_layout *layout,
1526 struct ceph_snap_context *snapc,
1527 u64 off, u64 len,
1528 u32 truncate_seq, u64 truncate_size,
1529 struct timespec *mtime,
1530 struct page **pages, int num_pages,
1531 int flags, int do_sync, bool nofail)
1532{
1533 struct ceph_osd_request *req;
1534 int rc = 0;
1535
1536 BUG_ON(vino.snap != CEPH_NOSNAP);
1537 req = ceph_osdc_new_request(osdc, layout, vino, off, &len,
1538 CEPH_OSD_OP_WRITE,
1539 flags | CEPH_OSD_FLAG_ONDISK |
1540 CEPH_OSD_FLAG_WRITE,
1541 snapc, do_sync,
1542 truncate_seq, truncate_size, mtime,
1543 nofail, 1);
1544 if (!req)
1545 return -ENOMEM;
1546
1547 /* it may be a short write due to an object boundary */
1548 req->r_pages = pages;
1549 dout("writepages %llu~%llu (%d pages)\n", off, len,
1550 req->r_num_pages);
1551
1552 rc = ceph_osdc_start_request(osdc, req, nofail);
1553 if (!rc)
1554 rc = ceph_osdc_wait_request(osdc, req);
1555
1556 ceph_osdc_put_request(req);
1557 if (rc == 0)
1558 rc = len;
1559 dout("writepages result %d\n", rc);
1560 return rc;
1561}
1562
1563/*
1564 * handle incoming message
1565 */
1566static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1567{
1568 struct ceph_osd *osd = con->private;
1569 struct ceph_osd_client *osdc;
1570 int type = le16_to_cpu(msg->hdr.type);
1571
1572 if (!osd)
1573 goto out;
1574 osdc = osd->o_osdc;
1575
1576 switch (type) {
1577 case CEPH_MSG_OSD_MAP:
1578 ceph_osdc_handle_map(osdc, msg);
1579 break;
1580 case CEPH_MSG_OSD_OPREPLY:
1581 handle_reply(osdc, msg, con);
1582 break;
1583
1584 default:
1585 pr_err("received unknown message type %d %s\n", type,
1586 ceph_msg_type_name(type));
1587 }
1588out:
1589 ceph_msg_put(msg);
1590}
1591
1592/*
1593 * lookup and return message for incoming reply. set up reply message
1594 * pages.
1595 */
1596static struct ceph_msg *get_reply(struct ceph_connection *con,
1597 struct ceph_msg_header *hdr,
1598 int *skip)
1599{
1600 struct ceph_osd *osd = con->private;
1601 struct ceph_osd_client *osdc = osd->o_osdc;
1602 struct ceph_msg *m;
1603 struct ceph_osd_request *req;
1604 int front = le32_to_cpu(hdr->front_len);
1605 int data_len = le32_to_cpu(hdr->data_len);
1606 u64 tid;
1607
1608 tid = le64_to_cpu(hdr->tid);
1609 mutex_lock(&osdc->request_mutex);
1610 req = __lookup_request(osdc, tid);
1611 if (!req) {
1612 *skip = 1;
1613 m = NULL;
1614 pr_info("get_reply unknown tid %llu from osd%d\n", tid,
1615 osd->o_osd);
1616 goto out;
1617 }
1618
1619 if (req->r_con_filling_msg) {
1620 dout("get_reply revoking msg %p from old con %p\n",
1621 req->r_reply, req->r_con_filling_msg);
1622 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
1623 ceph_con_put(req->r_con_filling_msg);
1624 req->r_con_filling_msg = NULL;
1625 }
1626
1627 if (front > req->r_reply->front.iov_len) {
1628 pr_warning("get_reply front %d > preallocated %d\n",
1629 front, (int)req->r_reply->front.iov_len);
1630 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front, GFP_NOFS);
1631 if (!m)
1632 goto out;
1633 ceph_msg_put(req->r_reply);
1634 req->r_reply = m;
1635 }
1636 m = ceph_msg_get(req->r_reply);
1637
1638 if (data_len > 0) {
1639 unsigned data_off = le16_to_cpu(hdr->data_off);
1640 int want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
1641
1642 if (unlikely(req->r_num_pages < want)) {
1643 pr_warning("tid %lld reply %d > expected %d pages\n",
1644 tid, want, m->nr_pages);
1645 *skip = 1;
1646 ceph_msg_put(m);
1647 m = NULL;
1648 goto out;
1649 }
1650 m->pages = req->r_pages;
1651 m->nr_pages = req->r_num_pages;
1652#ifdef CONFIG_BLOCK
1653 m->bio = req->r_bio;
1654#endif
1655 }
1656 *skip = 0;
1657 req->r_con_filling_msg = ceph_con_get(con);
1658 dout("get_reply tid %lld %p\n", tid, m);
1659
1660out:
1661 mutex_unlock(&osdc->request_mutex);
1662 return m;
1663
1664}
1665
1666static struct ceph_msg *alloc_msg(struct ceph_connection *con,
1667 struct ceph_msg_header *hdr,
1668 int *skip)
1669{
1670 struct ceph_osd *osd = con->private;
1671 int type = le16_to_cpu(hdr->type);
1672 int front = le32_to_cpu(hdr->front_len);
1673
1674 switch (type) {
1675 case CEPH_MSG_OSD_MAP:
1676 return ceph_msg_new(type, front, GFP_NOFS);
1677 case CEPH_MSG_OSD_OPREPLY:
1678 return get_reply(con, hdr, skip);
1679 default:
1680 pr_info("alloc_msg unexpected msg type %d from osd%d\n", type,
1681 osd->o_osd);
1682 *skip = 1;
1683 return NULL;
1684 }
1685}
1686
1687/*
1688 * Wrappers to refcount containing ceph_osd struct
1689 */
1690static struct ceph_connection *get_osd_con(struct ceph_connection *con)
1691{
1692 struct ceph_osd *osd = con->private;
1693 if (get_osd(osd))
1694 return con;
1695 return NULL;
1696}
1697
1698static void put_osd_con(struct ceph_connection *con)
1699{
1700 struct ceph_osd *osd = con->private;
1701 put_osd(osd);
1702}
1703
1704/*
1705 * authentication
1706 */
1707static int get_authorizer(struct ceph_connection *con,
1708 void **buf, int *len, int *proto,
1709 void **reply_buf, int *reply_len, int force_new)
1710{
1711 struct ceph_osd *o = con->private;
1712 struct ceph_osd_client *osdc = o->o_osdc;
1713 struct ceph_auth_client *ac = osdc->client->monc.auth;
1714 int ret = 0;
1715
1716 if (force_new && o->o_authorizer) {
1717 ac->ops->destroy_authorizer(ac, o->o_authorizer);
1718 o->o_authorizer = NULL;
1719 }
1720 if (o->o_authorizer == NULL) {
1721 ret = ac->ops->create_authorizer(
1722 ac, CEPH_ENTITY_TYPE_OSD,
1723 &o->o_authorizer,
1724 &o->o_authorizer_buf,
1725 &o->o_authorizer_buf_len,
1726 &o->o_authorizer_reply_buf,
1727 &o->o_authorizer_reply_buf_len);
1728 if (ret)
1729 return ret;
1730 }
1731
1732 *proto = ac->protocol;
1733 *buf = o->o_authorizer_buf;
1734 *len = o->o_authorizer_buf_len;
1735 *reply_buf = o->o_authorizer_reply_buf;
1736 *reply_len = o->o_authorizer_reply_buf_len;
1737 return 0;
1738}
1739
1740
1741static int verify_authorizer_reply(struct ceph_connection *con, int len)
1742{
1743 struct ceph_osd *o = con->private;
1744 struct ceph_osd_client *osdc = o->o_osdc;
1745 struct ceph_auth_client *ac = osdc->client->monc.auth;
1746
1747 return ac->ops->verify_authorizer_reply(ac, o->o_authorizer, len);
1748}
1749
1750static int invalidate_authorizer(struct ceph_connection *con)
1751{
1752 struct ceph_osd *o = con->private;
1753 struct ceph_osd_client *osdc = o->o_osdc;
1754 struct ceph_auth_client *ac = osdc->client->monc.auth;
1755
1756 if (ac->ops->invalidate_authorizer)
1757 ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
1758
1759 return ceph_monc_validate_auth(&osdc->client->monc);
1760}
1761
1762static const struct ceph_connection_operations osd_con_ops = {
1763 .get = get_osd_con,
1764 .put = put_osd_con,
1765 .dispatch = dispatch,
1766 .get_authorizer = get_authorizer,
1767 .verify_authorizer_reply = verify_authorizer_reply,
1768 .invalidate_authorizer = invalidate_authorizer,
1769 .alloc_msg = alloc_msg,
1770 .fault = osd_reset,
1771};
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h
deleted file mode 100644
index 65c9c560f1ac..000000000000
--- a/fs/ceph/osd_client.h
+++ /dev/null
@@ -1,233 +0,0 @@
1#ifndef _FS_CEPH_OSD_CLIENT_H
2#define _FS_CEPH_OSD_CLIENT_H
3
4#include <linux/completion.h>
5#include <linux/kref.h>
6#include <linux/mempool.h>
7#include <linux/rbtree.h>
8
9#include "types.h"
10#include "osdmap.h"
11#include "messenger.h"
12
13struct ceph_msg;
14struct ceph_snap_context;
15struct ceph_osd_request;
16struct ceph_osd_client;
17struct ceph_authorizer;
18struct ceph_pagelist;
19
20/*
21 * completion callback for async writepages
22 */
23typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *,
24 struct ceph_msg *);
25
26/* a given osd we're communicating with */
27struct ceph_osd {
28 atomic_t o_ref;
29 struct ceph_osd_client *o_osdc;
30 int o_osd;
31 int o_incarnation;
32 struct rb_node o_node;
33 struct ceph_connection o_con;
34 struct list_head o_requests;
35 struct list_head o_osd_lru;
36 struct ceph_authorizer *o_authorizer;
37 void *o_authorizer_buf, *o_authorizer_reply_buf;
38 size_t o_authorizer_buf_len, o_authorizer_reply_buf_len;
39 unsigned long lru_ttl;
40 int o_marked_for_keepalive;
41 struct list_head o_keepalive_item;
42};
43
44/* an in-flight request */
45struct ceph_osd_request {
46 u64 r_tid; /* unique for this client */
47 struct rb_node r_node;
48 struct list_head r_req_lru_item;
49 struct list_head r_osd_item;
50 struct ceph_osd *r_osd;
51 struct ceph_pg r_pgid;
52 int r_pg_osds[CEPH_PG_MAX_SIZE];
53 int r_num_pg_osds;
54
55 struct ceph_connection *r_con_filling_msg;
56
57 struct ceph_msg *r_request, *r_reply;
58 int r_result;
59 int r_flags; /* any additional flags for the osd */
60 u32 r_sent; /* >0 if r_request is sending/sent */
61 int r_got_reply;
62
63 struct ceph_osd_client *r_osdc;
64 struct kref r_kref;
65 bool r_mempool;
66 struct completion r_completion, r_safe_completion;
67 ceph_osdc_callback_t r_callback, r_safe_callback;
68 struct ceph_eversion r_reassert_version;
69 struct list_head r_unsafe_item;
70
71 struct inode *r_inode; /* for use by callbacks */
72
73 char r_oid[40]; /* object name */
74 int r_oid_len;
75 unsigned long r_stamp; /* send OR check time */
76 bool r_resend; /* msg send failed, needs retry */
77
78 struct ceph_file_layout r_file_layout;
79 struct ceph_snap_context *r_snapc; /* snap context for writes */
80 unsigned r_num_pages; /* size of page array (follows) */
81 struct page **r_pages; /* pages for data payload */
82 int r_pages_from_pool;
83 int r_own_pages; /* if true, i own page list */
84#ifdef CONFIG_BLOCK
85 struct bio *r_bio; /* instead of pages */
86#endif
87
88 struct ceph_pagelist *r_trail; /* trailing part of the data */
89};
90
91struct ceph_osd_client {
92 struct ceph_client *client;
93
94 struct ceph_osdmap *osdmap; /* current map */
95 struct rw_semaphore map_sem;
96 struct completion map_waiters;
97 u64 last_requested_map;
98
99 struct mutex request_mutex;
100 struct rb_root osds; /* osds */
101 struct list_head osd_lru; /* idle osds */
102 u64 timeout_tid; /* tid of timeout triggering rq */
103 u64 last_tid; /* tid of last request */
104 struct rb_root requests; /* pending requests */
105 struct list_head req_lru; /* pending requests lru */
106 int num_requests;
107 struct delayed_work timeout_work;
108 struct delayed_work osds_timeout_work;
109#ifdef CONFIG_DEBUG_FS
110 struct dentry *debugfs_file;
111#endif
112
113 mempool_t *req_mempool;
114
115 struct ceph_msgpool msgpool_op;
116 struct ceph_msgpool msgpool_op_reply;
117};
118
119struct ceph_osd_req_op {
120 u16 op; /* CEPH_OSD_OP_* */
121 u32 flags; /* CEPH_OSD_FLAG_* */
122 union {
123 struct {
124 u64 offset, length;
125 u64 truncate_size;
126 u32 truncate_seq;
127 } extent;
128 struct {
129 const char *name;
130 u32 name_len;
131 const char *val;
132 u32 value_len;
133 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
134 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
135 } xattr;
136 struct {
137 const char *class_name;
138 __u8 class_len;
139 const char *method_name;
140 __u8 method_len;
141 __u8 argc;
142 const char *indata;
143 u32 indata_len;
144 } cls;
145 struct {
146 u64 cookie, count;
147 } pgls;
148 struct {
149 u64 snapid;
150 } snap;
151 };
152 u32 payload_len;
153};
154
155extern int ceph_osdc_init(struct ceph_osd_client *osdc,
156 struct ceph_client *client);
157extern void ceph_osdc_stop(struct ceph_osd_client *osdc);
158
159extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
160 struct ceph_msg *msg);
161extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
162 struct ceph_msg *msg);
163
164extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc,
165 struct ceph_file_layout *layout,
166 u64 snapid,
167 u64 off, u64 *plen, u64 *bno,
168 struct ceph_osd_request *req,
169 struct ceph_osd_req_op *op);
170
171extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
172 int flags,
173 struct ceph_snap_context *snapc,
174 struct ceph_osd_req_op *ops,
175 bool use_mempool,
176 gfp_t gfp_flags,
177 struct page **pages,
178 struct bio *bio);
179
180extern void ceph_osdc_build_request(struct ceph_osd_request *req,
181 u64 off, u64 *plen,
182 struct ceph_osd_req_op *src_ops,
183 struct ceph_snap_context *snapc,
184 struct timespec *mtime,
185 const char *oid,
186 int oid_len);
187
188extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
189 struct ceph_file_layout *layout,
190 struct ceph_vino vino,
191 u64 offset, u64 *len, int op, int flags,
192 struct ceph_snap_context *snapc,
193 int do_sync, u32 truncate_seq,
194 u64 truncate_size,
195 struct timespec *mtime,
196 bool use_mempool, int num_reply);
197
198static inline void ceph_osdc_get_request(struct ceph_osd_request *req)
199{
200 kref_get(&req->r_kref);
201}
202extern void ceph_osdc_release_request(struct kref *kref);
203static inline void ceph_osdc_put_request(struct ceph_osd_request *req)
204{
205 kref_put(&req->r_kref, ceph_osdc_release_request);
206}
207
208extern int ceph_osdc_start_request(struct ceph_osd_client *osdc,
209 struct ceph_osd_request *req,
210 bool nofail);
211extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
212 struct ceph_osd_request *req);
213extern void ceph_osdc_sync(struct ceph_osd_client *osdc);
214
215extern int ceph_osdc_readpages(struct ceph_osd_client *osdc,
216 struct ceph_vino vino,
217 struct ceph_file_layout *layout,
218 u64 off, u64 *plen,
219 u32 truncate_seq, u64 truncate_size,
220 struct page **pages, int nr_pages);
221
222extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
223 struct ceph_vino vino,
224 struct ceph_file_layout *layout,
225 struct ceph_snap_context *sc,
226 u64 off, u64 len,
227 u32 truncate_seq, u64 truncate_size,
228 struct timespec *mtime,
229 struct page **pages, int nr_pages,
230 int flags, int do_sync, bool nofail);
231
232#endif
233
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
deleted file mode 100644
index 3ccd11761cb6..000000000000
--- a/fs/ceph/osdmap.c
+++ /dev/null
@@ -1,1123 +0,0 @@
1
2#include "ceph_debug.h"
3
4#include <linux/slab.h>
5#include <asm/div64.h>
6
7#include "super.h"
8#include "osdmap.h"
9#include "crush/hash.h"
10#include "crush/mapper.h"
11#include "decode.h"
12
13char *ceph_osdmap_state_str(char *str, int len, int state)
14{
15 int flag = 0;
16
17 if (!len)
18 goto done;
19
20 *str = '\0';
21 if (state) {
22 if (state & CEPH_OSD_EXISTS) {
23 snprintf(str, len, "exists");
24 flag = 1;
25 }
26 if (state & CEPH_OSD_UP) {
27 snprintf(str, len, "%s%s%s", str, (flag ? ", " : ""),
28 "up");
29 flag = 1;
30 }
31 } else {
32 snprintf(str, len, "doesn't exist");
33 }
34done:
35 return str;
36}
37
38/* maps */
39
40static int calc_bits_of(unsigned t)
41{
42 int b = 0;
43 while (t) {
44 t = t >> 1;
45 b++;
46 }
47 return b;
48}
49
50/*
51 * the foo_mask is the smallest value 2^n-1 that is >= foo.
52 */
53static void calc_pg_masks(struct ceph_pg_pool_info *pi)
54{
55 pi->pg_num_mask = (1 << calc_bits_of(le32_to_cpu(pi->v.pg_num)-1)) - 1;
56 pi->pgp_num_mask =
57 (1 << calc_bits_of(le32_to_cpu(pi->v.pgp_num)-1)) - 1;
58 pi->lpg_num_mask =
59 (1 << calc_bits_of(le32_to_cpu(pi->v.lpg_num)-1)) - 1;
60 pi->lpgp_num_mask =
61 (1 << calc_bits_of(le32_to_cpu(pi->v.lpgp_num)-1)) - 1;
62}
63
64/*
65 * decode crush map
66 */
67static int crush_decode_uniform_bucket(void **p, void *end,
68 struct crush_bucket_uniform *b)
69{
70 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
71 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
72 b->item_weight = ceph_decode_32(p);
73 return 0;
74bad:
75 return -EINVAL;
76}
77
78static int crush_decode_list_bucket(void **p, void *end,
79 struct crush_bucket_list *b)
80{
81 int j;
82 dout("crush_decode_list_bucket %p to %p\n", *p, end);
83 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
84 if (b->item_weights == NULL)
85 return -ENOMEM;
86 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
87 if (b->sum_weights == NULL)
88 return -ENOMEM;
89 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
90 for (j = 0; j < b->h.size; j++) {
91 b->item_weights[j] = ceph_decode_32(p);
92 b->sum_weights[j] = ceph_decode_32(p);
93 }
94 return 0;
95bad:
96 return -EINVAL;
97}
98
99static int crush_decode_tree_bucket(void **p, void *end,
100 struct crush_bucket_tree *b)
101{
102 int j;
103 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
104 ceph_decode_32_safe(p, end, b->num_nodes, bad);
105 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
106 if (b->node_weights == NULL)
107 return -ENOMEM;
108 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
109 for (j = 0; j < b->num_nodes; j++)
110 b->node_weights[j] = ceph_decode_32(p);
111 return 0;
112bad:
113 return -EINVAL;
114}
115
116static int crush_decode_straw_bucket(void **p, void *end,
117 struct crush_bucket_straw *b)
118{
119 int j;
120 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
121 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
122 if (b->item_weights == NULL)
123 return -ENOMEM;
124 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
125 if (b->straws == NULL)
126 return -ENOMEM;
127 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
128 for (j = 0; j < b->h.size; j++) {
129 b->item_weights[j] = ceph_decode_32(p);
130 b->straws[j] = ceph_decode_32(p);
131 }
132 return 0;
133bad:
134 return -EINVAL;
135}
136
137static struct crush_map *crush_decode(void *pbyval, void *end)
138{
139 struct crush_map *c;
140 int err = -EINVAL;
141 int i, j;
142 void **p = &pbyval;
143 void *start = pbyval;
144 u32 magic;
145
146 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
147
148 c = kzalloc(sizeof(*c), GFP_NOFS);
149 if (c == NULL)
150 return ERR_PTR(-ENOMEM);
151
152 ceph_decode_need(p, end, 4*sizeof(u32), bad);
153 magic = ceph_decode_32(p);
154 if (magic != CRUSH_MAGIC) {
155 pr_err("crush_decode magic %x != current %x\n",
156 (unsigned)magic, (unsigned)CRUSH_MAGIC);
157 goto bad;
158 }
159 c->max_buckets = ceph_decode_32(p);
160 c->max_rules = ceph_decode_32(p);
161 c->max_devices = ceph_decode_32(p);
162
163 c->device_parents = kcalloc(c->max_devices, sizeof(u32), GFP_NOFS);
164 if (c->device_parents == NULL)
165 goto badmem;
166 c->bucket_parents = kcalloc(c->max_buckets, sizeof(u32), GFP_NOFS);
167 if (c->bucket_parents == NULL)
168 goto badmem;
169
170 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
171 if (c->buckets == NULL)
172 goto badmem;
173 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
174 if (c->rules == NULL)
175 goto badmem;
176
177 /* buckets */
178 for (i = 0; i < c->max_buckets; i++) {
179 int size = 0;
180 u32 alg;
181 struct crush_bucket *b;
182
183 ceph_decode_32_safe(p, end, alg, bad);
184 if (alg == 0) {
185 c->buckets[i] = NULL;
186 continue;
187 }
188 dout("crush_decode bucket %d off %x %p to %p\n",
189 i, (int)(*p-start), *p, end);
190
191 switch (alg) {
192 case CRUSH_BUCKET_UNIFORM:
193 size = sizeof(struct crush_bucket_uniform);
194 break;
195 case CRUSH_BUCKET_LIST:
196 size = sizeof(struct crush_bucket_list);
197 break;
198 case CRUSH_BUCKET_TREE:
199 size = sizeof(struct crush_bucket_tree);
200 break;
201 case CRUSH_BUCKET_STRAW:
202 size = sizeof(struct crush_bucket_straw);
203 break;
204 default:
205 err = -EINVAL;
206 goto bad;
207 }
208 BUG_ON(size == 0);
209 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
210 if (b == NULL)
211 goto badmem;
212
213 ceph_decode_need(p, end, 4*sizeof(u32), bad);
214 b->id = ceph_decode_32(p);
215 b->type = ceph_decode_16(p);
216 b->alg = ceph_decode_8(p);
217 b->hash = ceph_decode_8(p);
218 b->weight = ceph_decode_32(p);
219 b->size = ceph_decode_32(p);
220
221 dout("crush_decode bucket size %d off %x %p to %p\n",
222 b->size, (int)(*p-start), *p, end);
223
224 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
225 if (b->items == NULL)
226 goto badmem;
227 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
228 if (b->perm == NULL)
229 goto badmem;
230 b->perm_n = 0;
231
232 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
233 for (j = 0; j < b->size; j++)
234 b->items[j] = ceph_decode_32(p);
235
236 switch (b->alg) {
237 case CRUSH_BUCKET_UNIFORM:
238 err = crush_decode_uniform_bucket(p, end,
239 (struct crush_bucket_uniform *)b);
240 if (err < 0)
241 goto bad;
242 break;
243 case CRUSH_BUCKET_LIST:
244 err = crush_decode_list_bucket(p, end,
245 (struct crush_bucket_list *)b);
246 if (err < 0)
247 goto bad;
248 break;
249 case CRUSH_BUCKET_TREE:
250 err = crush_decode_tree_bucket(p, end,
251 (struct crush_bucket_tree *)b);
252 if (err < 0)
253 goto bad;
254 break;
255 case CRUSH_BUCKET_STRAW:
256 err = crush_decode_straw_bucket(p, end,
257 (struct crush_bucket_straw *)b);
258 if (err < 0)
259 goto bad;
260 break;
261 }
262 }
263
264 /* rules */
265 dout("rule vec is %p\n", c->rules);
266 for (i = 0; i < c->max_rules; i++) {
267 u32 yes;
268 struct crush_rule *r;
269
270 ceph_decode_32_safe(p, end, yes, bad);
271 if (!yes) {
272 dout("crush_decode NO rule %d off %x %p to %p\n",
273 i, (int)(*p-start), *p, end);
274 c->rules[i] = NULL;
275 continue;
276 }
277
278 dout("crush_decode rule %d off %x %p to %p\n",
279 i, (int)(*p-start), *p, end);
280
281 /* len */
282 ceph_decode_32_safe(p, end, yes, bad);
283#if BITS_PER_LONG == 32
284 err = -EINVAL;
285 if (yes > ULONG_MAX / sizeof(struct crush_rule_step))
286 goto bad;
287#endif
288 r = c->rules[i] = kmalloc(sizeof(*r) +
289 yes*sizeof(struct crush_rule_step),
290 GFP_NOFS);
291 if (r == NULL)
292 goto badmem;
293 dout(" rule %d is at %p\n", i, r);
294 r->len = yes;
295 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
296 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
297 for (j = 0; j < r->len; j++) {
298 r->steps[j].op = ceph_decode_32(p);
299 r->steps[j].arg1 = ceph_decode_32(p);
300 r->steps[j].arg2 = ceph_decode_32(p);
301 }
302 }
303
304 /* ignore trailing name maps. */
305
306 dout("crush_decode success\n");
307 return c;
308
309badmem:
310 err = -ENOMEM;
311bad:
312 dout("crush_decode fail %d\n", err);
313 crush_destroy(c);
314 return ERR_PTR(err);
315}
316
317/*
318 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
319 * to a set of osds)
320 */
321static int pgid_cmp(struct ceph_pg l, struct ceph_pg r)
322{
323 u64 a = *(u64 *)&l;
324 u64 b = *(u64 *)&r;
325
326 if (a < b)
327 return -1;
328 if (a > b)
329 return 1;
330 return 0;
331}
332
333static int __insert_pg_mapping(struct ceph_pg_mapping *new,
334 struct rb_root *root)
335{
336 struct rb_node **p = &root->rb_node;
337 struct rb_node *parent = NULL;
338 struct ceph_pg_mapping *pg = NULL;
339 int c;
340
341 while (*p) {
342 parent = *p;
343 pg = rb_entry(parent, struct ceph_pg_mapping, node);
344 c = pgid_cmp(new->pgid, pg->pgid);
345 if (c < 0)
346 p = &(*p)->rb_left;
347 else if (c > 0)
348 p = &(*p)->rb_right;
349 else
350 return -EEXIST;
351 }
352
353 rb_link_node(&new->node, parent, p);
354 rb_insert_color(&new->node, root);
355 return 0;
356}
357
358static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
359 struct ceph_pg pgid)
360{
361 struct rb_node *n = root->rb_node;
362 struct ceph_pg_mapping *pg;
363 int c;
364
365 while (n) {
366 pg = rb_entry(n, struct ceph_pg_mapping, node);
367 c = pgid_cmp(pgid, pg->pgid);
368 if (c < 0)
369 n = n->rb_left;
370 else if (c > 0)
371 n = n->rb_right;
372 else
373 return pg;
374 }
375 return NULL;
376}
377
378/*
379 * rbtree of pg pool info
380 */
381static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
382{
383 struct rb_node **p = &root->rb_node;
384 struct rb_node *parent = NULL;
385 struct ceph_pg_pool_info *pi = NULL;
386
387 while (*p) {
388 parent = *p;
389 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
390 if (new->id < pi->id)
391 p = &(*p)->rb_left;
392 else if (new->id > pi->id)
393 p = &(*p)->rb_right;
394 else
395 return -EEXIST;
396 }
397
398 rb_link_node(&new->node, parent, p);
399 rb_insert_color(&new->node, root);
400 return 0;
401}
402
403static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
404{
405 struct ceph_pg_pool_info *pi;
406 struct rb_node *n = root->rb_node;
407
408 while (n) {
409 pi = rb_entry(n, struct ceph_pg_pool_info, node);
410 if (id < pi->id)
411 n = n->rb_left;
412 else if (id > pi->id)
413 n = n->rb_right;
414 else
415 return pi;
416 }
417 return NULL;
418}
419
420int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
421{
422 struct rb_node *rbp;
423
424 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
425 struct ceph_pg_pool_info *pi =
426 rb_entry(rbp, struct ceph_pg_pool_info, node);
427 if (pi->name && strcmp(pi->name, name) == 0)
428 return pi->id;
429 }
430 return -ENOENT;
431}
432
433static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
434{
435 rb_erase(&pi->node, root);
436 kfree(pi->name);
437 kfree(pi);
438}
439
440static int __decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
441{
442 unsigned n, m;
443
444 ceph_decode_copy(p, &pi->v, sizeof(pi->v));
445 calc_pg_masks(pi);
446
447 /* num_snaps * snap_info_t */
448 n = le32_to_cpu(pi->v.num_snaps);
449 while (n--) {
450 ceph_decode_need(p, end, sizeof(u64) + 1 + sizeof(u64) +
451 sizeof(struct ceph_timespec), bad);
452 *p += sizeof(u64) + /* key */
453 1 + sizeof(u64) + /* u8, snapid */
454 sizeof(struct ceph_timespec);
455 m = ceph_decode_32(p); /* snap name */
456 *p += m;
457 }
458
459 *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
460 return 0;
461
462bad:
463 return -EINVAL;
464}
465
466static int __decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
467{
468 struct ceph_pg_pool_info *pi;
469 u32 num, len, pool;
470
471 ceph_decode_32_safe(p, end, num, bad);
472 dout(" %d pool names\n", num);
473 while (num--) {
474 ceph_decode_32_safe(p, end, pool, bad);
475 ceph_decode_32_safe(p, end, len, bad);
476 dout(" pool %d len %d\n", pool, len);
477 pi = __lookup_pg_pool(&map->pg_pools, pool);
478 if (pi) {
479 kfree(pi->name);
480 pi->name = kmalloc(len + 1, GFP_NOFS);
481 if (pi->name) {
482 memcpy(pi->name, *p, len);
483 pi->name[len] = '\0';
484 dout(" name is %s\n", pi->name);
485 }
486 }
487 *p += len;
488 }
489 return 0;
490
491bad:
492 return -EINVAL;
493}
494
495/*
496 * osd map
497 */
498void ceph_osdmap_destroy(struct ceph_osdmap *map)
499{
500 dout("osdmap_destroy %p\n", map);
501 if (map->crush)
502 crush_destroy(map->crush);
503 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
504 struct ceph_pg_mapping *pg =
505 rb_entry(rb_first(&map->pg_temp),
506 struct ceph_pg_mapping, node);
507 rb_erase(&pg->node, &map->pg_temp);
508 kfree(pg);
509 }
510 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
511 struct ceph_pg_pool_info *pi =
512 rb_entry(rb_first(&map->pg_pools),
513 struct ceph_pg_pool_info, node);
514 __remove_pg_pool(&map->pg_pools, pi);
515 }
516 kfree(map->osd_state);
517 kfree(map->osd_weight);
518 kfree(map->osd_addr);
519 kfree(map);
520}
521
522/*
523 * adjust max osd value. reallocate arrays.
524 */
525static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
526{
527 u8 *state;
528 struct ceph_entity_addr *addr;
529 u32 *weight;
530
531 state = kcalloc(max, sizeof(*state), GFP_NOFS);
532 addr = kcalloc(max, sizeof(*addr), GFP_NOFS);
533 weight = kcalloc(max, sizeof(*weight), GFP_NOFS);
534 if (state == NULL || addr == NULL || weight == NULL) {
535 kfree(state);
536 kfree(addr);
537 kfree(weight);
538 return -ENOMEM;
539 }
540
541 /* copy old? */
542 if (map->osd_state) {
543 memcpy(state, map->osd_state, map->max_osd*sizeof(*state));
544 memcpy(addr, map->osd_addr, map->max_osd*sizeof(*addr));
545 memcpy(weight, map->osd_weight, map->max_osd*sizeof(*weight));
546 kfree(map->osd_state);
547 kfree(map->osd_addr);
548 kfree(map->osd_weight);
549 }
550
551 map->osd_state = state;
552 map->osd_weight = weight;
553 map->osd_addr = addr;
554 map->max_osd = max;
555 return 0;
556}
557
558/*
559 * decode a full map.
560 */
561struct ceph_osdmap *osdmap_decode(void **p, void *end)
562{
563 struct ceph_osdmap *map;
564 u16 version;
565 u32 len, max, i;
566 u8 ev;
567 int err = -EINVAL;
568 void *start = *p;
569 struct ceph_pg_pool_info *pi;
570
571 dout("osdmap_decode %p to %p len %d\n", *p, end, (int)(end - *p));
572
573 map = kzalloc(sizeof(*map), GFP_NOFS);
574 if (map == NULL)
575 return ERR_PTR(-ENOMEM);
576 map->pg_temp = RB_ROOT;
577
578 ceph_decode_16_safe(p, end, version, bad);
579 if (version > CEPH_OSDMAP_VERSION) {
580 pr_warning("got unknown v %d > %d of osdmap\n", version,
581 CEPH_OSDMAP_VERSION);
582 goto bad;
583 }
584
585 ceph_decode_need(p, end, 2*sizeof(u64)+6*sizeof(u32), bad);
586 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
587 map->epoch = ceph_decode_32(p);
588 ceph_decode_copy(p, &map->created, sizeof(map->created));
589 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
590
591 ceph_decode_32_safe(p, end, max, bad);
592 while (max--) {
593 ceph_decode_need(p, end, 4 + 1 + sizeof(pi->v), bad);
594 pi = kzalloc(sizeof(*pi), GFP_NOFS);
595 if (!pi)
596 goto bad;
597 pi->id = ceph_decode_32(p);
598 ev = ceph_decode_8(p); /* encoding version */
599 if (ev > CEPH_PG_POOL_VERSION) {
600 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
601 ev, CEPH_PG_POOL_VERSION);
602 kfree(pi);
603 goto bad;
604 }
605 err = __decode_pool(p, end, pi);
606 if (err < 0)
607 goto bad;
608 __insert_pg_pool(&map->pg_pools, pi);
609 }
610
611 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
612 goto bad;
613
614 ceph_decode_32_safe(p, end, map->pool_max, bad);
615
616 ceph_decode_32_safe(p, end, map->flags, bad);
617
618 max = ceph_decode_32(p);
619
620 /* (re)alloc osd arrays */
621 err = osdmap_set_max_osd(map, max);
622 if (err < 0)
623 goto bad;
624 dout("osdmap_decode max_osd = %d\n", map->max_osd);
625
626 /* osds */
627 err = -EINVAL;
628 ceph_decode_need(p, end, 3*sizeof(u32) +
629 map->max_osd*(1 + sizeof(*map->osd_weight) +
630 sizeof(*map->osd_addr)), bad);
631 *p += 4; /* skip length field (should match max) */
632 ceph_decode_copy(p, map->osd_state, map->max_osd);
633
634 *p += 4; /* skip length field (should match max) */
635 for (i = 0; i < map->max_osd; i++)
636 map->osd_weight[i] = ceph_decode_32(p);
637
638 *p += 4; /* skip length field (should match max) */
639 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
640 for (i = 0; i < map->max_osd; i++)
641 ceph_decode_addr(&map->osd_addr[i]);
642
643 /* pg_temp */
644 ceph_decode_32_safe(p, end, len, bad);
645 for (i = 0; i < len; i++) {
646 int n, j;
647 struct ceph_pg pgid;
648 struct ceph_pg_mapping *pg;
649
650 ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad);
651 ceph_decode_copy(p, &pgid, sizeof(pgid));
652 n = ceph_decode_32(p);
653 ceph_decode_need(p, end, n * sizeof(u32), bad);
654 err = -ENOMEM;
655 pg = kmalloc(sizeof(*pg) + n*sizeof(u32), GFP_NOFS);
656 if (!pg)
657 goto bad;
658 pg->pgid = pgid;
659 pg->len = n;
660 for (j = 0; j < n; j++)
661 pg->osds[j] = ceph_decode_32(p);
662
663 err = __insert_pg_mapping(pg, &map->pg_temp);
664 if (err)
665 goto bad;
666 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, len);
667 }
668
669 /* crush */
670 ceph_decode_32_safe(p, end, len, bad);
671 dout("osdmap_decode crush len %d from off 0x%x\n", len,
672 (int)(*p - start));
673 ceph_decode_need(p, end, len, bad);
674 map->crush = crush_decode(*p, end);
675 *p += len;
676 if (IS_ERR(map->crush)) {
677 err = PTR_ERR(map->crush);
678 map->crush = NULL;
679 goto bad;
680 }
681
682 /* ignore the rest of the map */
683 *p = end;
684
685 dout("osdmap_decode done %p %p\n", *p, end);
686 return map;
687
688bad:
689 dout("osdmap_decode fail\n");
690 ceph_osdmap_destroy(map);
691 return ERR_PTR(err);
692}
693
694/*
695 * decode and apply an incremental map update.
696 */
697struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
698 struct ceph_osdmap *map,
699 struct ceph_messenger *msgr)
700{
701 struct crush_map *newcrush = NULL;
702 struct ceph_fsid fsid;
703 u32 epoch = 0;
704 struct ceph_timespec modified;
705 u32 len, pool;
706 __s32 new_pool_max, new_flags, max;
707 void *start = *p;
708 int err = -EINVAL;
709 u16 version;
710 struct rb_node *rbp;
711
712 ceph_decode_16_safe(p, end, version, bad);
713 if (version > CEPH_OSDMAP_INC_VERSION) {
714 pr_warning("got unknown v %d > %d of inc osdmap\n", version,
715 CEPH_OSDMAP_INC_VERSION);
716 goto bad;
717 }
718
719 ceph_decode_need(p, end, sizeof(fsid)+sizeof(modified)+2*sizeof(u32),
720 bad);
721 ceph_decode_copy(p, &fsid, sizeof(fsid));
722 epoch = ceph_decode_32(p);
723 BUG_ON(epoch != map->epoch+1);
724 ceph_decode_copy(p, &modified, sizeof(modified));
725 new_pool_max = ceph_decode_32(p);
726 new_flags = ceph_decode_32(p);
727
728 /* full map? */
729 ceph_decode_32_safe(p, end, len, bad);
730 if (len > 0) {
731 dout("apply_incremental full map len %d, %p to %p\n",
732 len, *p, end);
733 return osdmap_decode(p, min(*p+len, end));
734 }
735
736 /* new crush? */
737 ceph_decode_32_safe(p, end, len, bad);
738 if (len > 0) {
739 dout("apply_incremental new crush map len %d, %p to %p\n",
740 len, *p, end);
741 newcrush = crush_decode(*p, min(*p+len, end));
742 if (IS_ERR(newcrush))
743 return ERR_CAST(newcrush);
744 *p += len;
745 }
746
747 /* new flags? */
748 if (new_flags >= 0)
749 map->flags = new_flags;
750 if (new_pool_max >= 0)
751 map->pool_max = new_pool_max;
752
753 ceph_decode_need(p, end, 5*sizeof(u32), bad);
754
755 /* new max? */
756 max = ceph_decode_32(p);
757 if (max >= 0) {
758 err = osdmap_set_max_osd(map, max);
759 if (err < 0)
760 goto bad;
761 }
762
763 map->epoch++;
764 map->modified = map->modified;
765 if (newcrush) {
766 if (map->crush)
767 crush_destroy(map->crush);
768 map->crush = newcrush;
769 newcrush = NULL;
770 }
771
772 /* new_pool */
773 ceph_decode_32_safe(p, end, len, bad);
774 while (len--) {
775 __u8 ev;
776 struct ceph_pg_pool_info *pi;
777
778 ceph_decode_32_safe(p, end, pool, bad);
779 ceph_decode_need(p, end, 1 + sizeof(pi->v), bad);
780 ev = ceph_decode_8(p); /* encoding version */
781 if (ev > CEPH_PG_POOL_VERSION) {
782 pr_warning("got unknown v %d > %d of ceph_pg_pool\n",
783 ev, CEPH_PG_POOL_VERSION);
784 goto bad;
785 }
786 pi = __lookup_pg_pool(&map->pg_pools, pool);
787 if (!pi) {
788 pi = kzalloc(sizeof(*pi), GFP_NOFS);
789 if (!pi) {
790 err = -ENOMEM;
791 goto bad;
792 }
793 pi->id = pool;
794 __insert_pg_pool(&map->pg_pools, pi);
795 }
796 err = __decode_pool(p, end, pi);
797 if (err < 0)
798 goto bad;
799 }
800 if (version >= 5 && __decode_pool_names(p, end, map) < 0)
801 goto bad;
802
803 /* old_pool */
804 ceph_decode_32_safe(p, end, len, bad);
805 while (len--) {
806 struct ceph_pg_pool_info *pi;
807
808 ceph_decode_32_safe(p, end, pool, bad);
809 pi = __lookup_pg_pool(&map->pg_pools, pool);
810 if (pi)
811 __remove_pg_pool(&map->pg_pools, pi);
812 }
813
814 /* new_up */
815 err = -EINVAL;
816 ceph_decode_32_safe(p, end, len, bad);
817 while (len--) {
818 u32 osd;
819 struct ceph_entity_addr addr;
820 ceph_decode_32_safe(p, end, osd, bad);
821 ceph_decode_copy_safe(p, end, &addr, sizeof(addr), bad);
822 ceph_decode_addr(&addr);
823 pr_info("osd%d up\n", osd);
824 BUG_ON(osd >= map->max_osd);
825 map->osd_state[osd] |= CEPH_OSD_UP;
826 map->osd_addr[osd] = addr;
827 }
828
829 /* new_down */
830 ceph_decode_32_safe(p, end, len, bad);
831 while (len--) {
832 u32 osd;
833 ceph_decode_32_safe(p, end, osd, bad);
834 (*p)++; /* clean flag */
835 pr_info("osd%d down\n", osd);
836 if (osd < map->max_osd)
837 map->osd_state[osd] &= ~CEPH_OSD_UP;
838 }
839
840 /* new_weight */
841 ceph_decode_32_safe(p, end, len, bad);
842 while (len--) {
843 u32 osd, off;
844 ceph_decode_need(p, end, sizeof(u32)*2, bad);
845 osd = ceph_decode_32(p);
846 off = ceph_decode_32(p);
847 pr_info("osd%d weight 0x%x %s\n", osd, off,
848 off == CEPH_OSD_IN ? "(in)" :
849 (off == CEPH_OSD_OUT ? "(out)" : ""));
850 if (osd < map->max_osd)
851 map->osd_weight[osd] = off;
852 }
853
854 /* new_pg_temp */
855 rbp = rb_first(&map->pg_temp);
856 ceph_decode_32_safe(p, end, len, bad);
857 while (len--) {
858 struct ceph_pg_mapping *pg;
859 int j;
860 struct ceph_pg pgid;
861 u32 pglen;
862 ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad);
863 ceph_decode_copy(p, &pgid, sizeof(pgid));
864 pglen = ceph_decode_32(p);
865
866 /* remove any? */
867 while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping,
868 node)->pgid, pgid) <= 0) {
869 struct ceph_pg_mapping *cur =
870 rb_entry(rbp, struct ceph_pg_mapping, node);
871
872 rbp = rb_next(rbp);
873 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
874 rb_erase(&cur->node, &map->pg_temp);
875 kfree(cur);
876 }
877
878 if (pglen) {
879 /* insert */
880 ceph_decode_need(p, end, pglen*sizeof(u32), bad);
881 pg = kmalloc(sizeof(*pg) + sizeof(u32)*pglen, GFP_NOFS);
882 if (!pg) {
883 err = -ENOMEM;
884 goto bad;
885 }
886 pg->pgid = pgid;
887 pg->len = pglen;
888 for (j = 0; j < pglen; j++)
889 pg->osds[j] = ceph_decode_32(p);
890 err = __insert_pg_mapping(pg, &map->pg_temp);
891 if (err) {
892 kfree(pg);
893 goto bad;
894 }
895 dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid,
896 pglen);
897 }
898 }
899 while (rbp) {
900 struct ceph_pg_mapping *cur =
901 rb_entry(rbp, struct ceph_pg_mapping, node);
902
903 rbp = rb_next(rbp);
904 dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid);
905 rb_erase(&cur->node, &map->pg_temp);
906 kfree(cur);
907 }
908
909 /* ignore the rest */
910 *p = end;
911 return map;
912
913bad:
914 pr_err("corrupt inc osdmap epoch %d off %d (%p of %p-%p)\n",
915 epoch, (int)(*p - start), *p, start, end);
916 print_hex_dump(KERN_DEBUG, "osdmap: ",
917 DUMP_PREFIX_OFFSET, 16, 1,
918 start, end - start, true);
919 if (newcrush)
920 crush_destroy(newcrush);
921 return ERR_PTR(err);
922}
923
924
925
926
927/*
928 * calculate file layout from given offset, length.
929 * fill in correct oid, logical length, and object extent
930 * offset, length.
931 *
932 * for now, we write only a single su, until we can
933 * pass a stride back to the caller.
934 */
935void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
936 u64 off, u64 *plen,
937 u64 *ono,
938 u64 *oxoff, u64 *oxlen)
939{
940 u32 osize = le32_to_cpu(layout->fl_object_size);
941 u32 su = le32_to_cpu(layout->fl_stripe_unit);
942 u32 sc = le32_to_cpu(layout->fl_stripe_count);
943 u32 bl, stripeno, stripepos, objsetno;
944 u32 su_per_object;
945 u64 t, su_offset;
946
947 dout("mapping %llu~%llu osize %u fl_su %u\n", off, *plen,
948 osize, su);
949 su_per_object = osize / su;
950 dout("osize %u / su %u = su_per_object %u\n", osize, su,
951 su_per_object);
952
953 BUG_ON((su & ~PAGE_MASK) != 0);
954 /* bl = *off / su; */
955 t = off;
956 do_div(t, su);
957 bl = t;
958 dout("off %llu / su %u = bl %u\n", off, su, bl);
959
960 stripeno = bl / sc;
961 stripepos = bl % sc;
962 objsetno = stripeno / su_per_object;
963
964 *ono = objsetno * sc + stripepos;
965 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned)*ono);
966
967 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
968 t = off;
969 su_offset = do_div(t, su);
970 *oxoff = su_offset + (stripeno % su_per_object) * su;
971
972 /*
973 * Calculate the length of the extent being written to the selected
974 * object. This is the minimum of the full length requested (plen) or
975 * the remainder of the current stripe being written to.
976 */
977 *oxlen = min_t(u64, *plen, su - su_offset);
978 *plen = *oxlen;
979
980 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
981}
982
983/*
984 * calculate an object layout (i.e. pgid) from an oid,
985 * file_layout, and osdmap
986 */
987int ceph_calc_object_layout(struct ceph_object_layout *ol,
988 const char *oid,
989 struct ceph_file_layout *fl,
990 struct ceph_osdmap *osdmap)
991{
992 unsigned num, num_mask;
993 struct ceph_pg pgid;
994 s32 preferred = (s32)le32_to_cpu(fl->fl_pg_preferred);
995 int poolid = le32_to_cpu(fl->fl_pg_pool);
996 struct ceph_pg_pool_info *pool;
997 unsigned ps;
998
999 BUG_ON(!osdmap);
1000
1001 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1002 if (!pool)
1003 return -EIO;
1004 ps = ceph_str_hash(pool->v.object_hash, oid, strlen(oid));
1005 if (preferred >= 0) {
1006 ps += preferred;
1007 num = le32_to_cpu(pool->v.lpg_num);
1008 num_mask = pool->lpg_num_mask;
1009 } else {
1010 num = le32_to_cpu(pool->v.pg_num);
1011 num_mask = pool->pg_num_mask;
1012 }
1013
1014 pgid.ps = cpu_to_le16(ps);
1015 pgid.preferred = cpu_to_le16(preferred);
1016 pgid.pool = fl->fl_pg_pool;
1017 if (preferred >= 0)
1018 dout("calc_object_layout '%s' pgid %d.%xp%d\n", oid, poolid, ps,
1019 (int)preferred);
1020 else
1021 dout("calc_object_layout '%s' pgid %d.%x\n", oid, poolid, ps);
1022
1023 ol->ol_pgid = pgid;
1024 ol->ol_stripe_unit = fl->fl_object_stripe_unit;
1025 return 0;
1026}
1027
1028/*
1029 * Calculate raw osd vector for the given pgid. Return pointer to osd
1030 * array, or NULL on failure.
1031 */
1032static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1033 int *osds, int *num)
1034{
1035 struct ceph_pg_mapping *pg;
1036 struct ceph_pg_pool_info *pool;
1037 int ruleno;
1038 unsigned poolid, ps, pps;
1039 int preferred;
1040
1041 /* pg_temp? */
1042 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
1043 if (pg) {
1044 *num = pg->len;
1045 return pg->osds;
1046 }
1047
1048 /* crush */
1049 poolid = le32_to_cpu(pgid.pool);
1050 ps = le16_to_cpu(pgid.ps);
1051 preferred = (s16)le16_to_cpu(pgid.preferred);
1052
1053 /* don't forcefeed bad device ids to crush */
1054 if (preferred >= osdmap->max_osd ||
1055 preferred >= osdmap->crush->max_devices)
1056 preferred = -1;
1057
1058 pool = __lookup_pg_pool(&osdmap->pg_pools, poolid);
1059 if (!pool)
1060 return NULL;
1061 ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset,
1062 pool->v.type, pool->v.size);
1063 if (ruleno < 0) {
1064 pr_err("no crush rule pool %d ruleset %d type %d size %d\n",
1065 poolid, pool->v.crush_ruleset, pool->v.type,
1066 pool->v.size);
1067 return NULL;
1068 }
1069
1070 if (preferred >= 0)
1071 pps = ceph_stable_mod(ps,
1072 le32_to_cpu(pool->v.lpgp_num),
1073 pool->lpgp_num_mask);
1074 else
1075 pps = ceph_stable_mod(ps,
1076 le32_to_cpu(pool->v.pgp_num),
1077 pool->pgp_num_mask);
1078 pps += poolid;
1079 *num = crush_do_rule(osdmap->crush, ruleno, pps, osds,
1080 min_t(int, pool->v.size, *num),
1081 preferred, osdmap->osd_weight);
1082 return osds;
1083}
1084
1085/*
1086 * Return acting set for given pgid.
1087 */
1088int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
1089 int *acting)
1090{
1091 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1092 int i, o, num = CEPH_PG_MAX_SIZE;
1093
1094 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1095 if (!osds)
1096 return -1;
1097
1098 /* primary is first up osd */
1099 o = 0;
1100 for (i = 0; i < num; i++)
1101 if (ceph_osd_is_up(osdmap, osds[i]))
1102 acting[o++] = osds[i];
1103 return o;
1104}
1105
1106/*
1107 * Return primary osd for given pgid, or -1 if none.
1108 */
1109int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, struct ceph_pg pgid)
1110{
1111 int rawosds[CEPH_PG_MAX_SIZE], *osds;
1112 int i, num = CEPH_PG_MAX_SIZE;
1113
1114 osds = calc_pg_raw(osdmap, pgid, rawosds, &num);
1115 if (!osds)
1116 return -1;
1117
1118 /* primary is first up osd */
1119 for (i = 0; i < num; i++)
1120 if (ceph_osd_is_up(osdmap, osds[i]))
1121 return osds[i];
1122 return -1;
1123}
diff --git a/fs/ceph/osdmap.h b/fs/ceph/osdmap.h
deleted file mode 100644
index a592b211be39..000000000000
--- a/fs/ceph/osdmap.h
+++ /dev/null
@@ -1,130 +0,0 @@
1#ifndef _FS_CEPH_OSDMAP_H
2#define _FS_CEPH_OSDMAP_H
3
4#include <linux/rbtree.h>
5#include "types.h"
6#include "ceph_fs.h"
7#include "crush/crush.h"
8
9/*
10 * The osd map describes the current membership of the osd cluster and
11 * specifies the mapping of objects to placement groups and placement
12 * groups to (sets of) osds. That is, it completely specifies the
13 * (desired) distribution of all data objects in the system at some
14 * point in time.
15 *
16 * Each map version is identified by an epoch, which increases monotonically.
17 *
18 * The map can be updated either via an incremental map (diff) describing
19 * the change between two successive epochs, or as a fully encoded map.
20 */
21struct ceph_pg_pool_info {
22 struct rb_node node;
23 int id;
24 struct ceph_pg_pool v;
25 int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask;
26 char *name;
27};
28
29struct ceph_pg_mapping {
30 struct rb_node node;
31 struct ceph_pg pgid;
32 int len;
33 int osds[];
34};
35
36struct ceph_osdmap {
37 struct ceph_fsid fsid;
38 u32 epoch;
39 u32 mkfs_epoch;
40 struct ceph_timespec created, modified;
41
42 u32 flags; /* CEPH_OSDMAP_* */
43
44 u32 max_osd; /* size of osd_state, _offload, _addr arrays */
45 u8 *osd_state; /* CEPH_OSD_* */
46 u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */
47 struct ceph_entity_addr *osd_addr;
48
49 struct rb_root pg_temp;
50 struct rb_root pg_pools;
51 u32 pool_max;
52
53 /* the CRUSH map specifies the mapping of placement groups to
54 * the list of osds that store+replicate them. */
55 struct crush_map *crush;
56};
57
58/*
59 * file layout helpers
60 */
61#define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit))
62#define ceph_file_layout_stripe_count(l) \
63 ((__s32)le32_to_cpu((l).fl_stripe_count))
64#define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size))
65#define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash))
66#define ceph_file_layout_object_su(l) \
67 ((__s32)le32_to_cpu((l).fl_object_stripe_unit))
68#define ceph_file_layout_pg_preferred(l) \
69 ((__s32)le32_to_cpu((l).fl_pg_preferred))
70#define ceph_file_layout_pg_pool(l) \
71 ((__s32)le32_to_cpu((l).fl_pg_pool))
72
73static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l)
74{
75 return le32_to_cpu(l->fl_stripe_unit) *
76 le32_to_cpu(l->fl_stripe_count);
77}
78
79/* "period" == bytes before i start on a new set of objects */
80static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l)
81{
82 return le32_to_cpu(l->fl_object_size) *
83 le32_to_cpu(l->fl_stripe_count);
84}
85
86
87static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd)
88{
89 return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP);
90}
91
92static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag)
93{
94 return map && (map->flags & flag);
95}
96
97extern char *ceph_osdmap_state_str(char *str, int len, int state);
98
99static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map,
100 int osd)
101{
102 if (osd >= map->max_osd)
103 return NULL;
104 return &map->osd_addr[osd];
105}
106
107extern struct ceph_osdmap *osdmap_decode(void **p, void *end);
108extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
109 struct ceph_osdmap *map,
110 struct ceph_messenger *msgr);
111extern void ceph_osdmap_destroy(struct ceph_osdmap *map);
112
113/* calculate mapping of a file extent to an object */
114extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
115 u64 off, u64 *plen,
116 u64 *bno, u64 *oxoff, u64 *oxlen);
117
118/* calculate mapping of object to a placement group */
119extern int ceph_calc_object_layout(struct ceph_object_layout *ol,
120 const char *oid,
121 struct ceph_file_layout *fl,
122 struct ceph_osdmap *osdmap);
123extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
124 int *acting);
125extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap,
126 struct ceph_pg pgid);
127
128extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
129
130#endif
diff --git a/fs/ceph/pagelist.c b/fs/ceph/pagelist.c
deleted file mode 100644
index 326e1c04176f..000000000000
--- a/fs/ceph/pagelist.c
+++ /dev/null
@@ -1,63 +0,0 @@
1
2#include <linux/gfp.h>
3#include <linux/pagemap.h>
4#include <linux/highmem.h>
5
6#include "pagelist.h"
7
8static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
9{
10 struct page *page = list_entry(pl->head.prev, struct page,
11 lru);
12 kunmap(page);
13}
14
15int ceph_pagelist_release(struct ceph_pagelist *pl)
16{
17 if (pl->mapped_tail)
18 ceph_pagelist_unmap_tail(pl);
19
20 while (!list_empty(&pl->head)) {
21 struct page *page = list_first_entry(&pl->head, struct page,
22 lru);
23 list_del(&page->lru);
24 __free_page(page);
25 }
26 return 0;
27}
28
29static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
30{
31 struct page *page = __page_cache_alloc(GFP_NOFS);
32 if (!page)
33 return -ENOMEM;
34 pl->room += PAGE_SIZE;
35 list_add_tail(&page->lru, &pl->head);
36 if (pl->mapped_tail)
37 ceph_pagelist_unmap_tail(pl);
38 pl->mapped_tail = kmap(page);
39 return 0;
40}
41
42int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
43{
44 while (pl->room < len) {
45 size_t bit = pl->room;
46 int ret;
47
48 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
49 buf, bit);
50 pl->length += bit;
51 pl->room -= bit;
52 buf += bit;
53 len -= bit;
54 ret = ceph_pagelist_addpage(pl);
55 if (ret)
56 return ret;
57 }
58
59 memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
60 pl->length += len;
61 pl->room -= len;
62 return 0;
63}
diff --git a/fs/ceph/pagelist.h b/fs/ceph/pagelist.h
deleted file mode 100644
index cc9327aa1c98..000000000000
--- a/fs/ceph/pagelist.h
+++ /dev/null
@@ -1,54 +0,0 @@
1#ifndef __FS_CEPH_PAGELIST_H
2#define __FS_CEPH_PAGELIST_H
3
4#include <linux/list.h>
5
6struct ceph_pagelist {
7 struct list_head head;
8 void *mapped_tail;
9 size_t length;
10 size_t room;
11};
12
13static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
14{
15 INIT_LIST_HEAD(&pl->head);
16 pl->mapped_tail = NULL;
17 pl->length = 0;
18 pl->room = 0;
19}
20extern int ceph_pagelist_release(struct ceph_pagelist *pl);
21
22extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l);
23
24static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
25{
26 __le64 ev = cpu_to_le64(v);
27 return ceph_pagelist_append(pl, &ev, sizeof(ev));
28}
29static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v)
30{
31 __le32 ev = cpu_to_le32(v);
32 return ceph_pagelist_append(pl, &ev, sizeof(ev));
33}
34static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v)
35{
36 __le16 ev = cpu_to_le16(v);
37 return ceph_pagelist_append(pl, &ev, sizeof(ev));
38}
39static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v)
40{
41 return ceph_pagelist_append(pl, &v, 1);
42}
43static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl,
44 char *s, size_t len)
45{
46 int ret = ceph_pagelist_encode_32(pl, len);
47 if (ret)
48 return ret;
49 if (len)
50 return ceph_pagelist_append(pl, s, len);
51 return 0;
52}
53
54#endif
diff --git a/fs/ceph/rados.h b/fs/ceph/rados.h
deleted file mode 100644
index 6d5247f2e81b..000000000000
--- a/fs/ceph/rados.h
+++ /dev/null
@@ -1,405 +0,0 @@
1#ifndef CEPH_RADOS_H
2#define CEPH_RADOS_H
3
4/*
5 * Data types for the Ceph distributed object storage layer RADOS
6 * (Reliable Autonomic Distributed Object Store).
7 */
8
9#include "msgr.h"
10
11/*
12 * osdmap encoding versions
13 */
14#define CEPH_OSDMAP_INC_VERSION 5
15#define CEPH_OSDMAP_INC_VERSION_EXT 5
16#define CEPH_OSDMAP_VERSION 5
17#define CEPH_OSDMAP_VERSION_EXT 5
18
19/*
20 * fs id
21 */
22struct ceph_fsid {
23 unsigned char fsid[16];
24};
25
26static inline int ceph_fsid_compare(const struct ceph_fsid *a,
27 const struct ceph_fsid *b)
28{
29 return memcmp(a, b, sizeof(*a));
30}
31
32/*
33 * ino, object, etc.
34 */
35typedef __le64 ceph_snapid_t;
36#define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */
37#define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */
38#define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */
39
40struct ceph_timespec {
41 __le32 tv_sec;
42 __le32 tv_nsec;
43} __attribute__ ((packed));
44
45
46/*
47 * object layout - how objects are mapped into PGs
48 */
49#define CEPH_OBJECT_LAYOUT_HASH 1
50#define CEPH_OBJECT_LAYOUT_LINEAR 2
51#define CEPH_OBJECT_LAYOUT_HASHINO 3
52
53/*
54 * pg layout -- how PGs are mapped onto (sets of) OSDs
55 */
56#define CEPH_PG_LAYOUT_CRUSH 0
57#define CEPH_PG_LAYOUT_HASH 1
58#define CEPH_PG_LAYOUT_LINEAR 2
59#define CEPH_PG_LAYOUT_HYBRID 3
60
61#define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */
62
63/*
64 * placement group.
65 * we encode this into one __le64.
66 */
67struct ceph_pg {
68 __le16 preferred; /* preferred primary osd */
69 __le16 ps; /* placement seed */
70 __le32 pool; /* object pool */
71} __attribute__ ((packed));
72
73/*
74 * pg_pool is a set of pgs storing a pool of objects
75 *
76 * pg_num -- base number of pseudorandomly placed pgs
77 *
78 * pgp_num -- effective number when calculating pg placement. this
79 * is used for pg_num increases. new pgs result in data being "split"
80 * into new pgs. for this to proceed smoothly, new pgs are intiially
81 * colocated with their parents; that is, pgp_num doesn't increase
82 * until the new pgs have successfully split. only _then_ are the new
83 * pgs placed independently.
84 *
85 * lpg_num -- localized pg count (per device). replicas are randomly
86 * selected.
87 *
88 * lpgp_num -- as above.
89 */
90#define CEPH_PG_TYPE_REP 1
91#define CEPH_PG_TYPE_RAID4 2
92#define CEPH_PG_POOL_VERSION 2
93struct ceph_pg_pool {
94 __u8 type; /* CEPH_PG_TYPE_* */
95 __u8 size; /* number of osds in each pg */
96 __u8 crush_ruleset; /* crush placement rule */
97 __u8 object_hash; /* hash mapping object name to ps */
98 __le32 pg_num, pgp_num; /* number of pg's */
99 __le32 lpg_num, lpgp_num; /* number of localized pg's */
100 __le32 last_change; /* most recent epoch changed */
101 __le64 snap_seq; /* seq for per-pool snapshot */
102 __le32 snap_epoch; /* epoch of last snap */
103 __le32 num_snaps;
104 __le32 num_removed_snap_intervals; /* if non-empty, NO per-pool snaps */
105 __le64 auid; /* who owns the pg */
106} __attribute__ ((packed));
107
108/*
109 * stable_mod func is used to control number of placement groups.
110 * similar to straight-up modulo, but produces a stable mapping as b
111 * increases over time. b is the number of bins, and bmask is the
112 * containing power of 2 minus 1.
113 *
114 * b <= bmask and bmask=(2**n)-1
115 * e.g., b=12 -> bmask=15, b=123 -> bmask=127
116 */
117static inline int ceph_stable_mod(int x, int b, int bmask)
118{
119 if ((x & bmask) < b)
120 return x & bmask;
121 else
122 return x & (bmask >> 1);
123}
124
125/*
126 * object layout - how a given object should be stored.
127 */
128struct ceph_object_layout {
129 struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */
130 __le32 ol_stripe_unit; /* for per-object parity, if any */
131} __attribute__ ((packed));
132
133/*
134 * compound epoch+version, used by storage layer to serialize mutations
135 */
136struct ceph_eversion {
137 __le32 epoch;
138 __le64 version;
139} __attribute__ ((packed));
140
141/*
142 * osd map bits
143 */
144
145/* status bits */
146#define CEPH_OSD_EXISTS 1
147#define CEPH_OSD_UP 2
148
149/* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */
150#define CEPH_OSD_IN 0x10000
151#define CEPH_OSD_OUT 0
152
153
154/*
155 * osd map flag bits
156 */
157#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
158#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
159#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
160#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
161#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
162
163/*
164 * osd ops
165 */
166#define CEPH_OSD_OP_MODE 0xf000
167#define CEPH_OSD_OP_MODE_RD 0x1000
168#define CEPH_OSD_OP_MODE_WR 0x2000
169#define CEPH_OSD_OP_MODE_RMW 0x3000
170#define CEPH_OSD_OP_MODE_SUB 0x4000
171
172#define CEPH_OSD_OP_TYPE 0x0f00
173#define CEPH_OSD_OP_TYPE_LOCK 0x0100
174#define CEPH_OSD_OP_TYPE_DATA 0x0200
175#define CEPH_OSD_OP_TYPE_ATTR 0x0300
176#define CEPH_OSD_OP_TYPE_EXEC 0x0400
177#define CEPH_OSD_OP_TYPE_PG 0x0500
178
179enum {
180 /** data **/
181 /* read */
182 CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1,
183 CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2,
184
185 /* fancy read */
186 CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4,
187
188 /* write */
189 CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1,
190 CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2,
191 CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3,
192 CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4,
193 CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5,
194
195 /* fancy write */
196 CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6,
197 CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7,
198 CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8,
199 CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9,
200
201 CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10,
202 CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11,
203 CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12,
204
205 CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13,
206 CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14,
207
208 /** attrs **/
209 /* read */
210 CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1,
211 CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2,
212 CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3,
213
214 /* write */
215 CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1,
216 CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2,
217 CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3,
218 CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4,
219
220 /** subop **/
221 CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1,
222 CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2,
223 CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3,
224 CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4,
225 CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5,
226
227 /** lock **/
228 CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1,
229 CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2,
230 CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3,
231 CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4,
232 CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5,
233 CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6,
234
235 /** exec **/
236 CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1,
237
238 /** pg **/
239 CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1,
240};
241
242static inline int ceph_osd_op_type_lock(int op)
243{
244 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK;
245}
246static inline int ceph_osd_op_type_data(int op)
247{
248 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA;
249}
250static inline int ceph_osd_op_type_attr(int op)
251{
252 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR;
253}
254static inline int ceph_osd_op_type_exec(int op)
255{
256 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC;
257}
258static inline int ceph_osd_op_type_pg(int op)
259{
260 return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG;
261}
262
263static inline int ceph_osd_op_mode_subop(int op)
264{
265 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB;
266}
267static inline int ceph_osd_op_mode_read(int op)
268{
269 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD;
270}
271static inline int ceph_osd_op_mode_modify(int op)
272{
273 return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR;
274}
275
276/*
277 * note that the following tmap stuff is also defined in the ceph librados.h
278 * any modification here needs to be updated there
279 */
280#define CEPH_OSD_TMAP_HDR 'h'
281#define CEPH_OSD_TMAP_SET 's'
282#define CEPH_OSD_TMAP_RM 'r'
283
284extern const char *ceph_osd_op_name(int op);
285
286
287/*
288 * osd op flags
289 *
290 * An op may be READ, WRITE, or READ|WRITE.
291 */
292enum {
293 CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */
294 CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */
295 CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */
296 CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */
297 CEPH_OSD_FLAG_READ = 16, /* op may read */
298 CEPH_OSD_FLAG_WRITE = 32, /* op may write */
299 CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */
300 CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */
301 CEPH_OSD_FLAG_BALANCE_READS = 256,
302 CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */
303 CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */
304 CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */
305 CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */
306};
307
308enum {
309 CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */
310};
311
312#define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/
313#define EBLACKLISTED ESHUTDOWN /* blacklisted */
314
315/* xattr comparison */
316enum {
317 CEPH_OSD_CMPXATTR_OP_NOP = 0,
318 CEPH_OSD_CMPXATTR_OP_EQ = 1,
319 CEPH_OSD_CMPXATTR_OP_NE = 2,
320 CEPH_OSD_CMPXATTR_OP_GT = 3,
321 CEPH_OSD_CMPXATTR_OP_GTE = 4,
322 CEPH_OSD_CMPXATTR_OP_LT = 5,
323 CEPH_OSD_CMPXATTR_OP_LTE = 6
324};
325
326enum {
327 CEPH_OSD_CMPXATTR_MODE_STRING = 1,
328 CEPH_OSD_CMPXATTR_MODE_U64 = 2
329};
330
331/*
332 * an individual object operation. each may be accompanied by some data
333 * payload
334 */
335struct ceph_osd_op {
336 __le16 op; /* CEPH_OSD_OP_* */
337 __le32 flags; /* CEPH_OSD_FLAG_* */
338 union {
339 struct {
340 __le64 offset, length;
341 __le64 truncate_size;
342 __le32 truncate_seq;
343 } __attribute__ ((packed)) extent;
344 struct {
345 __le32 name_len;
346 __le32 value_len;
347 __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
348 __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
349 } __attribute__ ((packed)) xattr;
350 struct {
351 __u8 class_len;
352 __u8 method_len;
353 __u8 argc;
354 __le32 indata_len;
355 } __attribute__ ((packed)) cls;
356 struct {
357 __le64 cookie, count;
358 } __attribute__ ((packed)) pgls;
359 struct {
360 __le64 snapid;
361 } __attribute__ ((packed)) snap;
362 };
363 __le32 payload_len;
364} __attribute__ ((packed));
365
366/*
367 * osd request message header. each request may include multiple
368 * ceph_osd_op object operations.
369 */
370struct ceph_osd_request_head {
371 __le32 client_inc; /* client incarnation */
372 struct ceph_object_layout layout; /* pgid */
373 __le32 osdmap_epoch; /* client's osdmap epoch */
374
375 __le32 flags;
376
377 struct ceph_timespec mtime; /* for mutations only */
378 struct ceph_eversion reassert_version; /* if we are replaying op */
379
380 __le32 object_len; /* length of object name */
381
382 __le64 snapid; /* snapid to read */
383 __le64 snap_seq; /* writer's snap context */
384 __le32 num_snaps;
385
386 __le16 num_ops;
387 struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */
388} __attribute__ ((packed));
389
390struct ceph_osd_reply_head {
391 __le32 client_inc; /* client incarnation */
392 __le32 flags;
393 struct ceph_object_layout layout;
394 __le32 osdmap_epoch;
395 struct ceph_eversion reassert_version; /* for replaying uncommitted */
396
397 __le32 result; /* result code */
398
399 __le32 object_len; /* length of object name */
400 __le32 num_ops;
401 struct ceph_osd_op ops[0]; /* ops[], object */
402} __attribute__ ((packed));
403
404
405#endif
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 190b6c4a6f2b..39c243acd062 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -1,10 +1,12 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2 2
3#include <linux/sort.h> 3#include <linux/sort.h>
4#include <linux/slab.h> 4#include <linux/slab.h>
5 5
6#include "super.h" 6#include "super.h"
7#include "decode.h" 7#include "mds_client.h"
8
9#include <linux/ceph/decode.h>
8 10
9/* 11/*
10 * Snapshots in ceph are driven in large part by cooperation from the 12 * Snapshots in ceph are driven in large part by cooperation from the
@@ -526,7 +528,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
526 struct ceph_cap_snap *capsnap) 528 struct ceph_cap_snap *capsnap)
527{ 529{
528 struct inode *inode = &ci->vfs_inode; 530 struct inode *inode = &ci->vfs_inode;
529 struct ceph_mds_client *mdsc = &ceph_sb_to_client(inode->i_sb)->mdsc; 531 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
530 532
531 BUG_ON(capsnap->writing); 533 BUG_ON(capsnap->writing);
532 capsnap->size = inode->i_size; 534 capsnap->size = inode->i_size;
@@ -747,7 +749,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
747 struct ceph_mds_session *session, 749 struct ceph_mds_session *session,
748 struct ceph_msg *msg) 750 struct ceph_msg *msg)
749{ 751{
750 struct super_block *sb = mdsc->client->sb; 752 struct super_block *sb = mdsc->fsc->sb;
751 int mds = session->s_mds; 753 int mds = session->s_mds;
752 u64 split; 754 u64 split;
753 int op; 755 int op;
diff --git a/fs/ceph/ceph_strings.c b/fs/ceph/strings.c
index c6179d3a26a2..cd5097d7c804 100644
--- a/fs/ceph/ceph_strings.c
+++ b/fs/ceph/strings.c
@@ -1,71 +1,9 @@
1/* 1/*
2 * Ceph string constants 2 * Ceph fs string constants
3 */ 3 */
4#include "types.h" 4#include <linux/module.h>
5#include <linux/ceph/types.h>
5 6
6const char *ceph_entity_type_name(int type)
7{
8 switch (type) {
9 case CEPH_ENTITY_TYPE_MDS: return "mds";
10 case CEPH_ENTITY_TYPE_OSD: return "osd";
11 case CEPH_ENTITY_TYPE_MON: return "mon";
12 case CEPH_ENTITY_TYPE_CLIENT: return "client";
13 case CEPH_ENTITY_TYPE_AUTH: return "auth";
14 default: return "unknown";
15 }
16}
17
18const char *ceph_osd_op_name(int op)
19{
20 switch (op) {
21 case CEPH_OSD_OP_READ: return "read";
22 case CEPH_OSD_OP_STAT: return "stat";
23
24 case CEPH_OSD_OP_MASKTRUNC: return "masktrunc";
25
26 case CEPH_OSD_OP_WRITE: return "write";
27 case CEPH_OSD_OP_DELETE: return "delete";
28 case CEPH_OSD_OP_TRUNCATE: return "truncate";
29 case CEPH_OSD_OP_ZERO: return "zero";
30 case CEPH_OSD_OP_WRITEFULL: return "writefull";
31 case CEPH_OSD_OP_ROLLBACK: return "rollback";
32
33 case CEPH_OSD_OP_APPEND: return "append";
34 case CEPH_OSD_OP_STARTSYNC: return "startsync";
35 case CEPH_OSD_OP_SETTRUNC: return "settrunc";
36 case CEPH_OSD_OP_TRIMTRUNC: return "trimtrunc";
37
38 case CEPH_OSD_OP_TMAPUP: return "tmapup";
39 case CEPH_OSD_OP_TMAPGET: return "tmapget";
40 case CEPH_OSD_OP_TMAPPUT: return "tmapput";
41
42 case CEPH_OSD_OP_GETXATTR: return "getxattr";
43 case CEPH_OSD_OP_GETXATTRS: return "getxattrs";
44 case CEPH_OSD_OP_SETXATTR: return "setxattr";
45 case CEPH_OSD_OP_SETXATTRS: return "setxattrs";
46 case CEPH_OSD_OP_RESETXATTRS: return "resetxattrs";
47 case CEPH_OSD_OP_RMXATTR: return "rmxattr";
48 case CEPH_OSD_OP_CMPXATTR: return "cmpxattr";
49
50 case CEPH_OSD_OP_PULL: return "pull";
51 case CEPH_OSD_OP_PUSH: return "push";
52 case CEPH_OSD_OP_BALANCEREADS: return "balance-reads";
53 case CEPH_OSD_OP_UNBALANCEREADS: return "unbalance-reads";
54 case CEPH_OSD_OP_SCRUB: return "scrub";
55
56 case CEPH_OSD_OP_WRLOCK: return "wrlock";
57 case CEPH_OSD_OP_WRUNLOCK: return "wrunlock";
58 case CEPH_OSD_OP_RDLOCK: return "rdlock";
59 case CEPH_OSD_OP_RDUNLOCK: return "rdunlock";
60 case CEPH_OSD_OP_UPLOCK: return "uplock";
61 case CEPH_OSD_OP_DNLOCK: return "dnlock";
62
63 case CEPH_OSD_OP_CALL: return "call";
64
65 case CEPH_OSD_OP_PGLS: return "pgls";
66 }
67 return "???";
68}
69 7
70const char *ceph_mds_state_name(int s) 8const char *ceph_mds_state_name(int s)
71{ 9{
@@ -177,17 +115,3 @@ const char *ceph_snap_op_name(int o)
177 } 115 }
178 return "???"; 116 return "???";
179} 117}
180
181const char *ceph_pool_op_name(int op)
182{
183 switch (op) {
184 case POOL_OP_CREATE: return "create";
185 case POOL_OP_DELETE: return "delete";
186 case POOL_OP_AUID_CHANGE: return "auid change";
187 case POOL_OP_CREATE_SNAP: return "create snap";
188 case POOL_OP_DELETE_SNAP: return "delete snap";
189 case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
190 case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
191 }
192 return "???";
193}
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 9922628532b2..d6e0e0421891 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -1,5 +1,5 @@
1 1
2#include "ceph_debug.h" 2#include <linux/ceph/ceph_debug.h>
3 3
4#include <linux/backing-dev.h> 4#include <linux/backing-dev.h>
5#include <linux/ctype.h> 5#include <linux/ctype.h>
@@ -15,10 +15,13 @@
15#include <linux/statfs.h> 15#include <linux/statfs.h>
16#include <linux/string.h> 16#include <linux/string.h>
17 17
18#include "decode.h"
19#include "super.h" 18#include "super.h"
20#include "mon_client.h" 19#include "mds_client.h"
21#include "auth.h" 20
21#include <linux/ceph/decode.h>
22#include <linux/ceph/mon_client.h>
23#include <linux/ceph/auth.h>
24#include <linux/ceph/debugfs.h>
22 25
23/* 26/*
24 * Ceph superblock operations 27 * Ceph superblock operations
@@ -26,36 +29,22 @@
26 * Handle the basics of mounting, unmounting. 29 * Handle the basics of mounting, unmounting.
27 */ 30 */
28 31
29
30/*
31 * find filename portion of a path (/foo/bar/baz -> baz)
32 */
33const char *ceph_file_part(const char *s, int len)
34{
35 const char *e = s + len;
36
37 while (e != s && *(e-1) != '/')
38 e--;
39 return e;
40}
41
42
43/* 32/*
44 * super ops 33 * super ops
45 */ 34 */
46static void ceph_put_super(struct super_block *s) 35static void ceph_put_super(struct super_block *s)
47{ 36{
48 struct ceph_client *client = ceph_sb_to_client(s); 37 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
49 38
50 dout("put_super\n"); 39 dout("put_super\n");
51 ceph_mdsc_close_sessions(&client->mdsc); 40 ceph_mdsc_close_sessions(fsc->mdsc);
52 41
53 /* 42 /*
54 * ensure we release the bdi before put_anon_super releases 43 * ensure we release the bdi before put_anon_super releases
55 * the device name. 44 * the device name.
56 */ 45 */
57 if (s->s_bdi == &client->backing_dev_info) { 46 if (s->s_bdi == &fsc->backing_dev_info) {
58 bdi_unregister(&client->backing_dev_info); 47 bdi_unregister(&fsc->backing_dev_info);
59 s->s_bdi = NULL; 48 s->s_bdi = NULL;
60 } 49 }
61 50
@@ -64,14 +53,14 @@ static void ceph_put_super(struct super_block *s)
64 53
65static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) 54static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
66{ 55{
67 struct ceph_client *client = ceph_inode_to_client(dentry->d_inode); 56 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
68 struct ceph_monmap *monmap = client->monc.monmap; 57 struct ceph_monmap *monmap = fsc->client->monc.monmap;
69 struct ceph_statfs st; 58 struct ceph_statfs st;
70 u64 fsid; 59 u64 fsid;
71 int err; 60 int err;
72 61
73 dout("statfs\n"); 62 dout("statfs\n");
74 err = ceph_monc_do_statfs(&client->monc, &st); 63 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
75 if (err < 0) 64 if (err < 0)
76 return err; 65 return err;
77 66
@@ -104,238 +93,28 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
104 93
105static int ceph_sync_fs(struct super_block *sb, int wait) 94static int ceph_sync_fs(struct super_block *sb, int wait)
106{ 95{
107 struct ceph_client *client = ceph_sb_to_client(sb); 96 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
108 97
109 if (!wait) { 98 if (!wait) {
110 dout("sync_fs (non-blocking)\n"); 99 dout("sync_fs (non-blocking)\n");
111 ceph_flush_dirty_caps(&client->mdsc); 100 ceph_flush_dirty_caps(fsc->mdsc);
112 dout("sync_fs (non-blocking) done\n"); 101 dout("sync_fs (non-blocking) done\n");
113 return 0; 102 return 0;
114 } 103 }
115 104
116 dout("sync_fs (blocking)\n"); 105 dout("sync_fs (blocking)\n");
117 ceph_osdc_sync(&ceph_sb_to_client(sb)->osdc); 106 ceph_osdc_sync(&fsc->client->osdc);
118 ceph_mdsc_sync(&ceph_sb_to_client(sb)->mdsc); 107 ceph_mdsc_sync(fsc->mdsc);
119 dout("sync_fs (blocking) done\n"); 108 dout("sync_fs (blocking) done\n");
120 return 0; 109 return 0;
121} 110}
122 111
123static int default_congestion_kb(void)
124{
125 int congestion_kb;
126
127 /*
128 * Copied from NFS
129 *
130 * congestion size, scale with available memory.
131 *
132 * 64MB: 8192k
133 * 128MB: 11585k
134 * 256MB: 16384k
135 * 512MB: 23170k
136 * 1GB: 32768k
137 * 2GB: 46340k
138 * 4GB: 65536k
139 * 8GB: 92681k
140 * 16GB: 131072k
141 *
142 * This allows larger machines to have larger/more transfers.
143 * Limit the default to 256M
144 */
145 congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
146 if (congestion_kb > 256*1024)
147 congestion_kb = 256*1024;
148
149 return congestion_kb;
150}
151
152/**
153 * ceph_show_options - Show mount options in /proc/mounts
154 * @m: seq_file to write to
155 * @mnt: mount descriptor
156 */
157static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
158{
159 struct ceph_client *client = ceph_sb_to_client(mnt->mnt_sb);
160 struct ceph_mount_args *args = client->mount_args;
161
162 if (args->flags & CEPH_OPT_FSID)
163 seq_printf(m, ",fsid=%pU", &args->fsid);
164 if (args->flags & CEPH_OPT_NOSHARE)
165 seq_puts(m, ",noshare");
166 if (args->flags & CEPH_OPT_DIRSTAT)
167 seq_puts(m, ",dirstat");
168 if ((args->flags & CEPH_OPT_RBYTES) == 0)
169 seq_puts(m, ",norbytes");
170 if (args->flags & CEPH_OPT_NOCRC)
171 seq_puts(m, ",nocrc");
172 if (args->flags & CEPH_OPT_NOASYNCREADDIR)
173 seq_puts(m, ",noasyncreaddir");
174
175 if (args->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
176 seq_printf(m, ",mount_timeout=%d", args->mount_timeout);
177 if (args->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
178 seq_printf(m, ",osd_idle_ttl=%d", args->osd_idle_ttl);
179 if (args->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
180 seq_printf(m, ",osdtimeout=%d", args->osd_timeout);
181 if (args->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
182 seq_printf(m, ",osdkeepalivetimeout=%d",
183 args->osd_keepalive_timeout);
184 if (args->wsize)
185 seq_printf(m, ",wsize=%d", args->wsize);
186 if (args->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
187 seq_printf(m, ",rsize=%d", args->rsize);
188 if (args->congestion_kb != default_congestion_kb())
189 seq_printf(m, ",write_congestion_kb=%d", args->congestion_kb);
190 if (args->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
191 seq_printf(m, ",caps_wanted_delay_min=%d",
192 args->caps_wanted_delay_min);
193 if (args->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
194 seq_printf(m, ",caps_wanted_delay_max=%d",
195 args->caps_wanted_delay_max);
196 if (args->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
197 seq_printf(m, ",cap_release_safety=%d",
198 args->cap_release_safety);
199 if (args->max_readdir != CEPH_MAX_READDIR_DEFAULT)
200 seq_printf(m, ",readdir_max_entries=%d", args->max_readdir);
201 if (args->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
202 seq_printf(m, ",readdir_max_bytes=%d", args->max_readdir_bytes);
203 if (strcmp(args->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
204 seq_printf(m, ",snapdirname=%s", args->snapdir_name);
205 if (args->name)
206 seq_printf(m, ",name=%s", args->name);
207 if (args->secret)
208 seq_puts(m, ",secret=<hidden>");
209 return 0;
210}
211
212/*
213 * caches
214 */
215struct kmem_cache *ceph_inode_cachep;
216struct kmem_cache *ceph_cap_cachep;
217struct kmem_cache *ceph_dentry_cachep;
218struct kmem_cache *ceph_file_cachep;
219
220static void ceph_inode_init_once(void *foo)
221{
222 struct ceph_inode_info *ci = foo;
223 inode_init_once(&ci->vfs_inode);
224}
225
226static int __init init_caches(void)
227{
228 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
229 sizeof(struct ceph_inode_info),
230 __alignof__(struct ceph_inode_info),
231 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
232 ceph_inode_init_once);
233 if (ceph_inode_cachep == NULL)
234 return -ENOMEM;
235
236 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
237 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
238 if (ceph_cap_cachep == NULL)
239 goto bad_cap;
240
241 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
242 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
243 if (ceph_dentry_cachep == NULL)
244 goto bad_dentry;
245
246 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
247 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
248 if (ceph_file_cachep == NULL)
249 goto bad_file;
250
251 return 0;
252
253bad_file:
254 kmem_cache_destroy(ceph_dentry_cachep);
255bad_dentry:
256 kmem_cache_destroy(ceph_cap_cachep);
257bad_cap:
258 kmem_cache_destroy(ceph_inode_cachep);
259 return -ENOMEM;
260}
261
262static void destroy_caches(void)
263{
264 kmem_cache_destroy(ceph_inode_cachep);
265 kmem_cache_destroy(ceph_cap_cachep);
266 kmem_cache_destroy(ceph_dentry_cachep);
267 kmem_cache_destroy(ceph_file_cachep);
268}
269
270
271/*
272 * ceph_umount_begin - initiate forced umount. Tear down down the
273 * mount, skipping steps that may hang while waiting for server(s).
274 */
275static void ceph_umount_begin(struct super_block *sb)
276{
277 struct ceph_client *client = ceph_sb_to_client(sb);
278
279 dout("ceph_umount_begin - starting forced umount\n");
280 if (!client)
281 return;
282 client->mount_state = CEPH_MOUNT_SHUTDOWN;
283 return;
284}
285
286static const struct super_operations ceph_super_ops = {
287 .alloc_inode = ceph_alloc_inode,
288 .destroy_inode = ceph_destroy_inode,
289 .write_inode = ceph_write_inode,
290 .sync_fs = ceph_sync_fs,
291 .put_super = ceph_put_super,
292 .show_options = ceph_show_options,
293 .statfs = ceph_statfs,
294 .umount_begin = ceph_umount_begin,
295};
296
297
298const char *ceph_msg_type_name(int type)
299{
300 switch (type) {
301 case CEPH_MSG_SHUTDOWN: return "shutdown";
302 case CEPH_MSG_PING: return "ping";
303 case CEPH_MSG_AUTH: return "auth";
304 case CEPH_MSG_AUTH_REPLY: return "auth_reply";
305 case CEPH_MSG_MON_MAP: return "mon_map";
306 case CEPH_MSG_MON_GET_MAP: return "mon_get_map";
307 case CEPH_MSG_MON_SUBSCRIBE: return "mon_subscribe";
308 case CEPH_MSG_MON_SUBSCRIBE_ACK: return "mon_subscribe_ack";
309 case CEPH_MSG_STATFS: return "statfs";
310 case CEPH_MSG_STATFS_REPLY: return "statfs_reply";
311 case CEPH_MSG_MDS_MAP: return "mds_map";
312 case CEPH_MSG_CLIENT_SESSION: return "client_session";
313 case CEPH_MSG_CLIENT_RECONNECT: return "client_reconnect";
314 case CEPH_MSG_CLIENT_REQUEST: return "client_request";
315 case CEPH_MSG_CLIENT_REQUEST_FORWARD: return "client_request_forward";
316 case CEPH_MSG_CLIENT_REPLY: return "client_reply";
317 case CEPH_MSG_CLIENT_CAPS: return "client_caps";
318 case CEPH_MSG_CLIENT_CAPRELEASE: return "client_cap_release";
319 case CEPH_MSG_CLIENT_SNAP: return "client_snap";
320 case CEPH_MSG_CLIENT_LEASE: return "client_lease";
321 case CEPH_MSG_OSD_MAP: return "osd_map";
322 case CEPH_MSG_OSD_OP: return "osd_op";
323 case CEPH_MSG_OSD_OPREPLY: return "osd_opreply";
324 default: return "unknown";
325 }
326}
327
328
329/* 112/*
330 * mount options 113 * mount options
331 */ 114 */
332enum { 115enum {
333 Opt_wsize, 116 Opt_wsize,
334 Opt_rsize, 117 Opt_rsize,
335 Opt_osdtimeout,
336 Opt_osdkeepalivetimeout,
337 Opt_mount_timeout,
338 Opt_osd_idle_ttl,
339 Opt_caps_wanted_delay_min, 118 Opt_caps_wanted_delay_min,
340 Opt_caps_wanted_delay_max, 119 Opt_caps_wanted_delay_max,
341 Opt_cap_release_safety, 120 Opt_cap_release_safety,
@@ -344,29 +123,19 @@ enum {
344 Opt_congestion_kb, 123 Opt_congestion_kb,
345 Opt_last_int, 124 Opt_last_int,
346 /* int args above */ 125 /* int args above */
347 Opt_fsid,
348 Opt_snapdirname, 126 Opt_snapdirname,
349 Opt_name,
350 Opt_secret,
351 Opt_last_string, 127 Opt_last_string,
352 /* string args above */ 128 /* string args above */
353 Opt_ip,
354 Opt_noshare,
355 Opt_dirstat, 129 Opt_dirstat,
356 Opt_nodirstat, 130 Opt_nodirstat,
357 Opt_rbytes, 131 Opt_rbytes,
358 Opt_norbytes, 132 Opt_norbytes,
359 Opt_nocrc,
360 Opt_noasyncreaddir, 133 Opt_noasyncreaddir,
361}; 134};
362 135
363static match_table_t arg_tokens = { 136static match_table_t fsopt_tokens = {
364 {Opt_wsize, "wsize=%d"}, 137 {Opt_wsize, "wsize=%d"},
365 {Opt_rsize, "rsize=%d"}, 138 {Opt_rsize, "rsize=%d"},
366 {Opt_osdtimeout, "osdtimeout=%d"},
367 {Opt_osdkeepalivetimeout, "osdkeepalive=%d"},
368 {Opt_mount_timeout, "mount_timeout=%d"},
369 {Opt_osd_idle_ttl, "osd_idle_ttl=%d"},
370 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"}, 139 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
371 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"}, 140 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
372 {Opt_cap_release_safety, "cap_release_safety=%d"}, 141 {Opt_cap_release_safety, "cap_release_safety=%d"},
@@ -374,403 +143,459 @@ static match_table_t arg_tokens = {
374 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"}, 143 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
375 {Opt_congestion_kb, "write_congestion_kb=%d"}, 144 {Opt_congestion_kb, "write_congestion_kb=%d"},
376 /* int args above */ 145 /* int args above */
377 {Opt_fsid, "fsid=%s"},
378 {Opt_snapdirname, "snapdirname=%s"}, 146 {Opt_snapdirname, "snapdirname=%s"},
379 {Opt_name, "name=%s"},
380 {Opt_secret, "secret=%s"},
381 /* string args above */ 147 /* string args above */
382 {Opt_ip, "ip=%s"},
383 {Opt_noshare, "noshare"},
384 {Opt_dirstat, "dirstat"}, 148 {Opt_dirstat, "dirstat"},
385 {Opt_nodirstat, "nodirstat"}, 149 {Opt_nodirstat, "nodirstat"},
386 {Opt_rbytes, "rbytes"}, 150 {Opt_rbytes, "rbytes"},
387 {Opt_norbytes, "norbytes"}, 151 {Opt_norbytes, "norbytes"},
388 {Opt_nocrc, "nocrc"},
389 {Opt_noasyncreaddir, "noasyncreaddir"}, 152 {Opt_noasyncreaddir, "noasyncreaddir"},
390 {-1, NULL} 153 {-1, NULL}
391}; 154};
392 155
393static int parse_fsid(const char *str, struct ceph_fsid *fsid) 156static int parse_fsopt_token(char *c, void *private)
394{ 157{
395 int i = 0; 158 struct ceph_mount_options *fsopt = private;
396 char tmp[3]; 159 substring_t argstr[MAX_OPT_ARGS];
397 int err = -EINVAL; 160 int token, intval, ret;
398 int d; 161
399 162 token = match_token((char *)c, fsopt_tokens, argstr);
400 dout("parse_fsid '%s'\n", str); 163 if (token < 0)
401 tmp[2] = 0; 164 return -EINVAL;
402 while (*str && i < 16) { 165
403 if (ispunct(*str)) { 166 if (token < Opt_last_int) {
404 str++; 167 ret = match_int(&argstr[0], &intval);
405 continue; 168 if (ret < 0) {
169 pr_err("bad mount option arg (not int) "
170 "at '%s'\n", c);
171 return ret;
406 } 172 }
407 if (!isxdigit(str[0]) || !isxdigit(str[1])) 173 dout("got int token %d val %d\n", token, intval);
408 break; 174 } else if (token > Opt_last_int && token < Opt_last_string) {
409 tmp[0] = str[0]; 175 dout("got string token %d val %s\n", token,
410 tmp[1] = str[1]; 176 argstr[0].from);
411 if (sscanf(tmp, "%x", &d) < 1) 177 } else {
412 break; 178 dout("got token %d\n", token);
413 fsid->fsid[i] = d & 0xff;
414 i++;
415 str += 2;
416 } 179 }
417 180
418 if (i == 16) 181 switch (token) {
419 err = 0; 182 case Opt_snapdirname:
420 dout("parse_fsid ret %d got fsid %pU", err, fsid); 183 kfree(fsopt->snapdir_name);
421 return err; 184 fsopt->snapdir_name = kstrndup(argstr[0].from,
185 argstr[0].to-argstr[0].from,
186 GFP_KERNEL);
187 if (!fsopt->snapdir_name)
188 return -ENOMEM;
189 break;
190
191 /* misc */
192 case Opt_wsize:
193 fsopt->wsize = intval;
194 break;
195 case Opt_rsize:
196 fsopt->rsize = intval;
197 break;
198 case Opt_caps_wanted_delay_min:
199 fsopt->caps_wanted_delay_min = intval;
200 break;
201 case Opt_caps_wanted_delay_max:
202 fsopt->caps_wanted_delay_max = intval;
203 break;
204 case Opt_readdir_max_entries:
205 fsopt->max_readdir = intval;
206 break;
207 case Opt_readdir_max_bytes:
208 fsopt->max_readdir_bytes = intval;
209 break;
210 case Opt_congestion_kb:
211 fsopt->congestion_kb = intval;
212 break;
213 case Opt_dirstat:
214 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
215 break;
216 case Opt_nodirstat:
217 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
218 break;
219 case Opt_rbytes:
220 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
221 break;
222 case Opt_norbytes:
223 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
224 break;
225 case Opt_noasyncreaddir:
226 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
227 break;
228 default:
229 BUG_ON(token);
230 }
231 return 0;
422} 232}
423 233
424static struct ceph_mount_args *parse_mount_args(int flags, char *options, 234static void destroy_mount_options(struct ceph_mount_options *args)
425 const char *dev_name,
426 const char **path)
427{ 235{
428 struct ceph_mount_args *args; 236 dout("destroy_mount_options %p\n", args);
429 const char *c; 237 kfree(args->snapdir_name);
430 int err = -ENOMEM; 238 kfree(args);
431 substring_t argstr[MAX_OPT_ARGS]; 239}
432 240
433 args = kzalloc(sizeof(*args), GFP_KERNEL); 241static int strcmp_null(const char *s1, const char *s2)
434 if (!args) 242{
435 return ERR_PTR(-ENOMEM); 243 if (!s1 && !s2)
436 args->mon_addr = kcalloc(CEPH_MAX_MON, sizeof(*args->mon_addr), 244 return 0;
437 GFP_KERNEL); 245 if (s1 && !s2)
438 if (!args->mon_addr) 246 return -1;
439 goto out; 247 if (!s1 && s2)
248 return 1;
249 return strcmp(s1, s2);
250}
440 251
441 dout("parse_mount_args %p, dev_name '%s'\n", args, dev_name); 252static int compare_mount_options(struct ceph_mount_options *new_fsopt,
442 253 struct ceph_options *new_opt,
443 /* start with defaults */ 254 struct ceph_fs_client *fsc)
444 args->sb_flags = flags; 255{
445 args->flags = CEPH_OPT_DEFAULT; 256 struct ceph_mount_options *fsopt1 = new_fsopt;
446 args->osd_timeout = CEPH_OSD_TIMEOUT_DEFAULT; 257 struct ceph_mount_options *fsopt2 = fsc->mount_options;
447 args->osd_keepalive_timeout = CEPH_OSD_KEEPALIVE_DEFAULT; 258 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
448 args->mount_timeout = CEPH_MOUNT_TIMEOUT_DEFAULT; /* seconds */ 259 int ret;
449 args->osd_idle_ttl = CEPH_OSD_IDLE_TTL_DEFAULT; /* seconds */
450 args->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
451 args->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
452 args->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
453 args->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
454 args->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
455 args->max_readdir = CEPH_MAX_READDIR_DEFAULT;
456 args->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
457 args->congestion_kb = default_congestion_kb();
458
459 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
460 err = -EINVAL;
461 if (!dev_name)
462 goto out;
463 *path = strstr(dev_name, ":/");
464 if (*path == NULL) {
465 pr_err("device name is missing path (no :/ in %s)\n",
466 dev_name);
467 goto out;
468 }
469 260
470 /* get mon ip(s) */ 261 ret = memcmp(fsopt1, fsopt2, ofs);
471 err = ceph_parse_ips(dev_name, *path, args->mon_addr, 262 if (ret)
472 CEPH_MAX_MON, &args->num_mon); 263 return ret;
473 if (err < 0) 264
474 goto out; 265 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
266 if (ret)
267 return ret;
268
269 return ceph_compare_options(new_opt, fsc->client);
270}
271
272static int parse_mount_options(struct ceph_mount_options **pfsopt,
273 struct ceph_options **popt,
274 int flags, char *options,
275 const char *dev_name,
276 const char **path)
277{
278 struct ceph_mount_options *fsopt;
279 const char *dev_name_end;
280 int err = -ENOMEM;
281
282 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
283 if (!fsopt)
284 return -ENOMEM;
285
286 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
287
288 fsopt->sb_flags = flags;
289 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
290
291 fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
292 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
293 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
294 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
295 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
296 fsopt->congestion_kb = default_congestion_kb();
297
298 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
299 err = -EINVAL;
300 if (!dev_name)
301 goto out;
302 *path = strstr(dev_name, ":/");
303 if (*path == NULL) {
304 pr_err("device name is missing path (no :/ in %s)\n",
305 dev_name);
306 goto out;
307 }
308 dev_name_end = *path;
309 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
475 310
476 /* path on server */ 311 /* path on server */
477 *path += 2; 312 *path += 2;
478 dout("server path '%s'\n", *path); 313 dout("server path '%s'\n", *path);
479 314
480 /* parse mount options */ 315 err = ceph_parse_options(popt, options, dev_name, dev_name_end,
481 while ((c = strsep(&options, ",")) != NULL) { 316 parse_fsopt_token, (void *)fsopt);
482 int token, intval, ret; 317 if (err)
483 if (!*c) 318 goto out;
484 continue; 319
485 err = -EINVAL; 320 /* success */
486 token = match_token((char *)c, arg_tokens, argstr); 321 *pfsopt = fsopt;
487 if (token < 0) { 322 return 0;
488 pr_err("bad mount option at '%s'\n", c);
489 goto out;
490 }
491 if (token < Opt_last_int) {
492 ret = match_int(&argstr[0], &intval);
493 if (ret < 0) {
494 pr_err("bad mount option arg (not int) "
495 "at '%s'\n", c);
496 continue;
497 }
498 dout("got int token %d val %d\n", token, intval);
499 } else if (token > Opt_last_int && token < Opt_last_string) {
500 dout("got string token %d val %s\n", token,
501 argstr[0].from);
502 } else {
503 dout("got token %d\n", token);
504 }
505 switch (token) {
506 case Opt_ip:
507 err = ceph_parse_ips(argstr[0].from,
508 argstr[0].to,
509 &args->my_addr,
510 1, NULL);
511 if (err < 0)
512 goto out;
513 args->flags |= CEPH_OPT_MYIP;
514 break;
515
516 case Opt_fsid:
517 err = parse_fsid(argstr[0].from, &args->fsid);
518 if (err == 0)
519 args->flags |= CEPH_OPT_FSID;
520 break;
521 case Opt_snapdirname:
522 kfree(args->snapdir_name);
523 args->snapdir_name = kstrndup(argstr[0].from,
524 argstr[0].to-argstr[0].from,
525 GFP_KERNEL);
526 break;
527 case Opt_name:
528 args->name = kstrndup(argstr[0].from,
529 argstr[0].to-argstr[0].from,
530 GFP_KERNEL);
531 break;
532 case Opt_secret:
533 args->secret = kstrndup(argstr[0].from,
534 argstr[0].to-argstr[0].from,
535 GFP_KERNEL);
536 break;
537
538 /* misc */
539 case Opt_wsize:
540 args->wsize = intval;
541 break;
542 case Opt_rsize:
543 args->rsize = intval;
544 break;
545 case Opt_osdtimeout:
546 args->osd_timeout = intval;
547 break;
548 case Opt_osdkeepalivetimeout:
549 args->osd_keepalive_timeout = intval;
550 break;
551 case Opt_osd_idle_ttl:
552 args->osd_idle_ttl = intval;
553 break;
554 case Opt_mount_timeout:
555 args->mount_timeout = intval;
556 break;
557 case Opt_caps_wanted_delay_min:
558 args->caps_wanted_delay_min = intval;
559 break;
560 case Opt_caps_wanted_delay_max:
561 args->caps_wanted_delay_max = intval;
562 break;
563 case Opt_readdir_max_entries:
564 args->max_readdir = intval;
565 break;
566 case Opt_readdir_max_bytes:
567 args->max_readdir_bytes = intval;
568 break;
569 case Opt_congestion_kb:
570 args->congestion_kb = intval;
571 break;
572
573 case Opt_noshare:
574 args->flags |= CEPH_OPT_NOSHARE;
575 break;
576
577 case Opt_dirstat:
578 args->flags |= CEPH_OPT_DIRSTAT;
579 break;
580 case Opt_nodirstat:
581 args->flags &= ~CEPH_OPT_DIRSTAT;
582 break;
583 case Opt_rbytes:
584 args->flags |= CEPH_OPT_RBYTES;
585 break;
586 case Opt_norbytes:
587 args->flags &= ~CEPH_OPT_RBYTES;
588 break;
589 case Opt_nocrc:
590 args->flags |= CEPH_OPT_NOCRC;
591 break;
592 case Opt_noasyncreaddir:
593 args->flags |= CEPH_OPT_NOASYNCREADDIR;
594 break;
595
596 default:
597 BUG_ON(token);
598 }
599 }
600 return args;
601 323
602out: 324out:
603 kfree(args->mon_addr); 325 destroy_mount_options(fsopt);
604 kfree(args); 326 return err;
605 return ERR_PTR(err);
606} 327}
607 328
608static void destroy_mount_args(struct ceph_mount_args *args) 329/**
330 * ceph_show_options - Show mount options in /proc/mounts
331 * @m: seq_file to write to
332 * @mnt: mount descriptor
333 */
334static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
609{ 335{
610 dout("destroy_mount_args %p\n", args); 336 struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
611 kfree(args->snapdir_name); 337 struct ceph_mount_options *fsopt = fsc->mount_options;
612 args->snapdir_name = NULL; 338 struct ceph_options *opt = fsc->client->options;
613 kfree(args->name); 339
614 args->name = NULL; 340 if (opt->flags & CEPH_OPT_FSID)
615 kfree(args->secret); 341 seq_printf(m, ",fsid=%pU", &opt->fsid);
616 args->secret = NULL; 342 if (opt->flags & CEPH_OPT_NOSHARE)
617 kfree(args); 343 seq_puts(m, ",noshare");
344 if (opt->flags & CEPH_OPT_NOCRC)
345 seq_puts(m, ",nocrc");
346
347 if (opt->name)
348 seq_printf(m, ",name=%s", opt->name);
349 if (opt->secret)
350 seq_puts(m, ",secret=<hidden>");
351
352 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
353 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
354 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
355 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
356 if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
357 seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
358 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
359 seq_printf(m, ",osdkeepalivetimeout=%d",
360 opt->osd_keepalive_timeout);
361
362 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
363 seq_puts(m, ",dirstat");
364 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
365 seq_puts(m, ",norbytes");
366 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
367 seq_puts(m, ",noasyncreaddir");
368
369 if (fsopt->wsize)
370 seq_printf(m, ",wsize=%d", fsopt->wsize);
371 if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
372 seq_printf(m, ",rsize=%d", fsopt->rsize);
373 if (fsopt->congestion_kb != default_congestion_kb())
374 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
375 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
376 seq_printf(m, ",caps_wanted_delay_min=%d",
377 fsopt->caps_wanted_delay_min);
378 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
379 seq_printf(m, ",caps_wanted_delay_max=%d",
380 fsopt->caps_wanted_delay_max);
381 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
382 seq_printf(m, ",cap_release_safety=%d",
383 fsopt->cap_release_safety);
384 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
385 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
386 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
387 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
388 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
389 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
390 return 0;
618} 391}
619 392
620/* 393/*
621 * create a fresh client instance 394 * handle any mon messages the standard library doesn't understand.
395 * return error if we don't either.
622 */ 396 */
623static struct ceph_client *ceph_create_client(struct ceph_mount_args *args) 397static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
624{ 398{
625 struct ceph_client *client; 399 struct ceph_fs_client *fsc = client->private;
400 int type = le16_to_cpu(msg->hdr.type);
401
402 switch (type) {
403 case CEPH_MSG_MDS_MAP:
404 ceph_mdsc_handle_map(fsc->mdsc, msg);
405 return 0;
406
407 default:
408 return -1;
409 }
410}
411
412/*
413 * create a new fs client
414 */
415struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
416 struct ceph_options *opt)
417{
418 struct ceph_fs_client *fsc;
626 int err = -ENOMEM; 419 int err = -ENOMEM;
627 420
628 client = kzalloc(sizeof(*client), GFP_KERNEL); 421 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
629 if (client == NULL) 422 if (!fsc)
630 return ERR_PTR(-ENOMEM); 423 return ERR_PTR(-ENOMEM);
631 424
632 mutex_init(&client->mount_mutex); 425 fsc->client = ceph_create_client(opt, fsc);
633 426 if (IS_ERR(fsc->client)) {
634 init_waitqueue_head(&client->auth_wq); 427 err = PTR_ERR(fsc->client);
428 goto fail;
429 }
430 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
431 fsc->client->supported_features |= CEPH_FEATURE_FLOCK;
432 fsc->client->monc.want_mdsmap = 1;
635 433
636 client->sb = NULL; 434 fsc->mount_options = fsopt;
637 client->mount_state = CEPH_MOUNT_MOUNTING;
638 client->mount_args = args;
639 435
640 client->msgr = NULL; 436 fsc->sb = NULL;
437 fsc->mount_state = CEPH_MOUNT_MOUNTING;
641 438
642 client->auth_err = 0; 439 atomic_long_set(&fsc->writeback_count, 0);
643 atomic_long_set(&client->writeback_count, 0);
644 440
645 err = bdi_init(&client->backing_dev_info); 441 err = bdi_init(&fsc->backing_dev_info);
646 if (err < 0) 442 if (err < 0)
647 goto fail; 443 goto fail_client;
648 444
649 err = -ENOMEM; 445 err = -ENOMEM;
650 client->wb_wq = create_workqueue("ceph-writeback"); 446 fsc->wb_wq = create_workqueue("ceph-writeback");
651 if (client->wb_wq == NULL) 447 if (fsc->wb_wq == NULL)
652 goto fail_bdi; 448 goto fail_bdi;
653 client->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid"); 449 fsc->pg_inv_wq = create_singlethread_workqueue("ceph-pg-invalid");
654 if (client->pg_inv_wq == NULL) 450 if (fsc->pg_inv_wq == NULL)
655 goto fail_wb_wq; 451 goto fail_wb_wq;
656 client->trunc_wq = create_singlethread_workqueue("ceph-trunc"); 452 fsc->trunc_wq = create_singlethread_workqueue("ceph-trunc");
657 if (client->trunc_wq == NULL) 453 if (fsc->trunc_wq == NULL)
658 goto fail_pg_inv_wq; 454 goto fail_pg_inv_wq;
659 455
660 /* set up mempools */ 456 /* set up mempools */
661 err = -ENOMEM; 457 err = -ENOMEM;
662 client->wb_pagevec_pool = mempool_create_kmalloc_pool(10, 458 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
663 client->mount_args->wsize >> PAGE_CACHE_SHIFT); 459 fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
664 if (!client->wb_pagevec_pool) 460 if (!fsc->wb_pagevec_pool)
665 goto fail_trunc_wq; 461 goto fail_trunc_wq;
666 462
667 /* caps */ 463 /* caps */
668 client->min_caps = args->max_readdir; 464 fsc->min_caps = fsopt->max_readdir;
465
466 return fsc;
669 467
670 /* subsystems */
671 err = ceph_monc_init(&client->monc, client);
672 if (err < 0)
673 goto fail_mempool;
674 err = ceph_osdc_init(&client->osdc, client);
675 if (err < 0)
676 goto fail_monc;
677 err = ceph_mdsc_init(&client->mdsc, client);
678 if (err < 0)
679 goto fail_osdc;
680 return client;
681
682fail_osdc:
683 ceph_osdc_stop(&client->osdc);
684fail_monc:
685 ceph_monc_stop(&client->monc);
686fail_mempool:
687 mempool_destroy(client->wb_pagevec_pool);
688fail_trunc_wq: 468fail_trunc_wq:
689 destroy_workqueue(client->trunc_wq); 469 destroy_workqueue(fsc->trunc_wq);
690fail_pg_inv_wq: 470fail_pg_inv_wq:
691 destroy_workqueue(client->pg_inv_wq); 471 destroy_workqueue(fsc->pg_inv_wq);
692fail_wb_wq: 472fail_wb_wq:
693 destroy_workqueue(client->wb_wq); 473 destroy_workqueue(fsc->wb_wq);
694fail_bdi: 474fail_bdi:
695 bdi_destroy(&client->backing_dev_info); 475 bdi_destroy(&fsc->backing_dev_info);
476fail_client:
477 ceph_destroy_client(fsc->client);
696fail: 478fail:
697 kfree(client); 479 kfree(fsc);
698 return ERR_PTR(err); 480 return ERR_PTR(err);
699} 481}
700 482
701static void ceph_destroy_client(struct ceph_client *client) 483void destroy_fs_client(struct ceph_fs_client *fsc)
702{ 484{
703 dout("destroy_client %p\n", client); 485 dout("destroy_fs_client %p\n", fsc);
704 486
705 /* unmount */ 487 destroy_workqueue(fsc->wb_wq);
706 ceph_mdsc_stop(&client->mdsc); 488 destroy_workqueue(fsc->pg_inv_wq);
707 ceph_osdc_stop(&client->osdc); 489 destroy_workqueue(fsc->trunc_wq);
708 490
709 /* 491 bdi_destroy(&fsc->backing_dev_info);
710 * make sure mds and osd connections close out before destroying
711 * the auth module, which is needed to free those connections'
712 * ceph_authorizers.
713 */
714 ceph_msgr_flush();
715
716 ceph_monc_stop(&client->monc);
717 492
718 ceph_debugfs_client_cleanup(client); 493 mempool_destroy(fsc->wb_pagevec_pool);
719 destroy_workqueue(client->wb_wq);
720 destroy_workqueue(client->pg_inv_wq);
721 destroy_workqueue(client->trunc_wq);
722 494
723 bdi_destroy(&client->backing_dev_info); 495 destroy_mount_options(fsc->mount_options);
724 496
725 if (client->msgr) 497 ceph_fs_debugfs_cleanup(fsc);
726 ceph_messenger_destroy(client->msgr);
727 mempool_destroy(client->wb_pagevec_pool);
728 498
729 destroy_mount_args(client->mount_args); 499 ceph_destroy_client(fsc->client);
730 500
731 kfree(client); 501 kfree(fsc);
732 dout("destroy_client %p done\n", client); 502 dout("destroy_fs_client %p done\n", fsc);
733} 503}
734 504
735/* 505/*
736 * Initially learn our fsid, or verify an fsid matches. 506 * caches
737 */ 507 */
738int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) 508struct kmem_cache *ceph_inode_cachep;
509struct kmem_cache *ceph_cap_cachep;
510struct kmem_cache *ceph_dentry_cachep;
511struct kmem_cache *ceph_file_cachep;
512
513static void ceph_inode_init_once(void *foo)
739{ 514{
740 if (client->have_fsid) { 515 struct ceph_inode_info *ci = foo;
741 if (ceph_fsid_compare(&client->fsid, fsid)) { 516 inode_init_once(&ci->vfs_inode);
742 pr_err("bad fsid, had %pU got %pU", 517}
743 &client->fsid, fsid); 518
744 return -1; 519static int __init init_caches(void)
745 } 520{
746 } else { 521 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
747 pr_info("client%lld fsid %pU\n", client->monc.auth->global_id, 522 sizeof(struct ceph_inode_info),
748 fsid); 523 __alignof__(struct ceph_inode_info),
749 memcpy(&client->fsid, fsid, sizeof(*fsid)); 524 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
750 ceph_debugfs_client_init(client); 525 ceph_inode_init_once);
751 client->have_fsid = true; 526 if (ceph_inode_cachep == NULL)
752 } 527 return -ENOMEM;
528
529 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
530 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
531 if (ceph_cap_cachep == NULL)
532 goto bad_cap;
533
534 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
535 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
536 if (ceph_dentry_cachep == NULL)
537 goto bad_dentry;
538
539 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
540 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
541 if (ceph_file_cachep == NULL)
542 goto bad_file;
543
753 return 0; 544 return 0;
545
546bad_file:
547 kmem_cache_destroy(ceph_dentry_cachep);
548bad_dentry:
549 kmem_cache_destroy(ceph_cap_cachep);
550bad_cap:
551 kmem_cache_destroy(ceph_inode_cachep);
552 return -ENOMEM;
754} 553}
755 554
555static void destroy_caches(void)
556{
557 kmem_cache_destroy(ceph_inode_cachep);
558 kmem_cache_destroy(ceph_cap_cachep);
559 kmem_cache_destroy(ceph_dentry_cachep);
560 kmem_cache_destroy(ceph_file_cachep);
561}
562
563
756/* 564/*
757 * true if we have the mon map (and have thus joined the cluster) 565 * ceph_umount_begin - initiate forced umount. Tear down down the
566 * mount, skipping steps that may hang while waiting for server(s).
758 */ 567 */
759static int have_mon_and_osd_map(struct ceph_client *client) 568static void ceph_umount_begin(struct super_block *sb)
760{ 569{
761 return client->monc.monmap && client->monc.monmap->epoch && 570 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
762 client->osdc.osdmap && client->osdc.osdmap->epoch; 571
572 dout("ceph_umount_begin - starting forced umount\n");
573 if (!fsc)
574 return;
575 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
576 return;
763} 577}
764 578
579static const struct super_operations ceph_super_ops = {
580 .alloc_inode = ceph_alloc_inode,
581 .destroy_inode = ceph_destroy_inode,
582 .write_inode = ceph_write_inode,
583 .sync_fs = ceph_sync_fs,
584 .put_super = ceph_put_super,
585 .show_options = ceph_show_options,
586 .statfs = ceph_statfs,
587 .umount_begin = ceph_umount_begin,
588};
589
765/* 590/*
766 * Bootstrap mount by opening the root directory. Note the mount 591 * Bootstrap mount by opening the root directory. Note the mount
767 * @started time from caller, and time out if this takes too long. 592 * @started time from caller, and time out if this takes too long.
768 */ 593 */
769static struct dentry *open_root_dentry(struct ceph_client *client, 594static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
770 const char *path, 595 const char *path,
771 unsigned long started) 596 unsigned long started)
772{ 597{
773 struct ceph_mds_client *mdsc = &client->mdsc; 598 struct ceph_mds_client *mdsc = fsc->mdsc;
774 struct ceph_mds_request *req = NULL; 599 struct ceph_mds_request *req = NULL;
775 int err; 600 int err;
776 struct dentry *root; 601 struct dentry *root;
@@ -784,14 +609,14 @@ static struct dentry *open_root_dentry(struct ceph_client *client,
784 req->r_ino1.ino = CEPH_INO_ROOT; 609 req->r_ino1.ino = CEPH_INO_ROOT;
785 req->r_ino1.snap = CEPH_NOSNAP; 610 req->r_ino1.snap = CEPH_NOSNAP;
786 req->r_started = started; 611 req->r_started = started;
787 req->r_timeout = client->mount_args->mount_timeout * HZ; 612 req->r_timeout = fsc->client->options->mount_timeout * HZ;
788 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); 613 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
789 req->r_num_caps = 2; 614 req->r_num_caps = 2;
790 err = ceph_mdsc_do_request(mdsc, NULL, req); 615 err = ceph_mdsc_do_request(mdsc, NULL, req);
791 if (err == 0) { 616 if (err == 0) {
792 dout("open_root_inode success\n"); 617 dout("open_root_inode success\n");
793 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT && 618 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
794 client->sb->s_root == NULL) 619 fsc->sb->s_root == NULL)
795 root = d_alloc_root(req->r_target_inode); 620 root = d_alloc_root(req->r_target_inode);
796 else 621 else
797 root = d_obtain_alias(req->r_target_inode); 622 root = d_obtain_alias(req->r_target_inode);
@@ -804,105 +629,86 @@ static struct dentry *open_root_dentry(struct ceph_client *client,
804 return root; 629 return root;
805} 630}
806 631
632
633
634
807/* 635/*
808 * mount: join the ceph cluster, and open root directory. 636 * mount: join the ceph cluster, and open root directory.
809 */ 637 */
810static int ceph_mount(struct ceph_client *client, struct vfsmount *mnt, 638static int ceph_mount(struct ceph_fs_client *fsc, struct vfsmount *mnt,
811 const char *path) 639 const char *path)
812{ 640{
813 struct ceph_entity_addr *myaddr = NULL;
814 int err; 641 int err;
815 unsigned long timeout = client->mount_args->mount_timeout * HZ;
816 unsigned long started = jiffies; /* note the start time */ 642 unsigned long started = jiffies; /* note the start time */
817 struct dentry *root; 643 struct dentry *root;
644 int first = 0; /* first vfsmount for this super_block */
818 645
819 dout("mount start\n"); 646 dout("mount start\n");
820 mutex_lock(&client->mount_mutex); 647 mutex_lock(&fsc->client->mount_mutex);
821
822 /* initialize the messenger */
823 if (client->msgr == NULL) {
824 if (ceph_test_opt(client, MYIP))
825 myaddr = &client->mount_args->my_addr;
826 client->msgr = ceph_messenger_create(myaddr);
827 if (IS_ERR(client->msgr)) {
828 err = PTR_ERR(client->msgr);
829 client->msgr = NULL;
830 goto out;
831 }
832 client->msgr->nocrc = ceph_test_opt(client, NOCRC);
833 }
834 648
835 /* open session, and wait for mon, mds, and osd maps */ 649 err = __ceph_open_session(fsc->client, started);
836 err = ceph_monc_open_session(&client->monc);
837 if (err < 0) 650 if (err < 0)
838 goto out; 651 goto out;
839 652
840 while (!have_mon_and_osd_map(client)) {
841 err = -EIO;
842 if (timeout && time_after_eq(jiffies, started + timeout))
843 goto out;
844
845 /* wait */
846 dout("mount waiting for mon_map\n");
847 err = wait_event_interruptible_timeout(client->auth_wq,
848 have_mon_and_osd_map(client) || (client->auth_err < 0),
849 timeout);
850 if (err == -EINTR || err == -ERESTARTSYS)
851 goto out;
852 if (client->auth_err < 0) {
853 err = client->auth_err;
854 goto out;
855 }
856 }
857
858 dout("mount opening root\n"); 653 dout("mount opening root\n");
859 root = open_root_dentry(client, "", started); 654 root = open_root_dentry(fsc, "", started);
860 if (IS_ERR(root)) { 655 if (IS_ERR(root)) {
861 err = PTR_ERR(root); 656 err = PTR_ERR(root);
862 goto out; 657 goto out;
863 } 658 }
864 if (client->sb->s_root) 659 if (fsc->sb->s_root) {
865 dput(root); 660 dput(root);
866 else 661 } else {
867 client->sb->s_root = root; 662 fsc->sb->s_root = root;
663 first = 1;
664
665 err = ceph_fs_debugfs_init(fsc);
666 if (err < 0)
667 goto fail;
668 }
868 669
869 if (path[0] == 0) { 670 if (path[0] == 0) {
870 dget(root); 671 dget(root);
871 } else { 672 } else {
872 dout("mount opening base mountpoint\n"); 673 dout("mount opening base mountpoint\n");
873 root = open_root_dentry(client, path, started); 674 root = open_root_dentry(fsc, path, started);
874 if (IS_ERR(root)) { 675 if (IS_ERR(root)) {
875 err = PTR_ERR(root); 676 err = PTR_ERR(root);
876 dput(client->sb->s_root); 677 goto fail;
877 client->sb->s_root = NULL;
878 goto out;
879 } 678 }
880 } 679 }
881 680
882 mnt->mnt_root = root; 681 mnt->mnt_root = root;
883 mnt->mnt_sb = client->sb; 682 mnt->mnt_sb = fsc->sb;
884 683
885 client->mount_state = CEPH_MOUNT_MOUNTED; 684 fsc->mount_state = CEPH_MOUNT_MOUNTED;
886 dout("mount success\n"); 685 dout("mount success\n");
887 err = 0; 686 err = 0;
888 687
889out: 688out:
890 mutex_unlock(&client->mount_mutex); 689 mutex_unlock(&fsc->client->mount_mutex);
891 return err; 690 return err;
691
692fail:
693 if (first) {
694 dput(fsc->sb->s_root);
695 fsc->sb->s_root = NULL;
696 }
697 goto out;
892} 698}
893 699
894static int ceph_set_super(struct super_block *s, void *data) 700static int ceph_set_super(struct super_block *s, void *data)
895{ 701{
896 struct ceph_client *client = data; 702 struct ceph_fs_client *fsc = data;
897 int ret; 703 int ret;
898 704
899 dout("set_super %p data %p\n", s, data); 705 dout("set_super %p data %p\n", s, data);
900 706
901 s->s_flags = client->mount_args->sb_flags; 707 s->s_flags = fsc->mount_options->sb_flags;
902 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */ 708 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
903 709
904 s->s_fs_info = client; 710 s->s_fs_info = fsc;
905 client->sb = s; 711 fsc->sb = s;
906 712
907 s->s_op = &ceph_super_ops; 713 s->s_op = &ceph_super_ops;
908 s->s_export_op = &ceph_export_ops; 714 s->s_export_op = &ceph_export_ops;
@@ -917,7 +723,7 @@ static int ceph_set_super(struct super_block *s, void *data)
917 723
918fail: 724fail:
919 s->s_fs_info = NULL; 725 s->s_fs_info = NULL;
920 client->sb = NULL; 726 fsc->sb = NULL;
921 return ret; 727 return ret;
922} 728}
923 729
@@ -926,30 +732,23 @@ fail:
926 */ 732 */
927static int ceph_compare_super(struct super_block *sb, void *data) 733static int ceph_compare_super(struct super_block *sb, void *data)
928{ 734{
929 struct ceph_client *new = data; 735 struct ceph_fs_client *new = data;
930 struct ceph_mount_args *args = new->mount_args; 736 struct ceph_mount_options *fsopt = new->mount_options;
931 struct ceph_client *other = ceph_sb_to_client(sb); 737 struct ceph_options *opt = new->client->options;
932 int i; 738 struct ceph_fs_client *other = ceph_sb_to_client(sb);
933 739
934 dout("ceph_compare_super %p\n", sb); 740 dout("ceph_compare_super %p\n", sb);
935 if (args->flags & CEPH_OPT_FSID) { 741
936 if (ceph_fsid_compare(&args->fsid, &other->fsid)) { 742 if (compare_mount_options(fsopt, opt, other)) {
937 dout("fsid doesn't match\n"); 743 dout("monitor(s)/mount options don't match\n");
938 return 0; 744 return 0;
939 }
940 } else {
941 /* do we share (a) monitor? */
942 for (i = 0; i < new->monc.monmap->num_mon; i++)
943 if (ceph_monmap_contains(other->monc.monmap,
944 &new->monc.monmap->mon_inst[i].addr))
945 break;
946 if (i == new->monc.monmap->num_mon) {
947 dout("mon ip not part of monmap\n");
948 return 0;
949 }
950 dout("mon ip matches existing sb %p\n", sb);
951 } 745 }
952 if (args->sb_flags != other->mount_args->sb_flags) { 746 if ((opt->flags & CEPH_OPT_FSID) &&
747 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
748 dout("fsid doesn't match\n");
749 return 0;
750 }
751 if (fsopt->sb_flags != other->mount_options->sb_flags) {
953 dout("flags differ\n"); 752 dout("flags differ\n");
954 return 0; 753 return 0;
955 } 754 }
@@ -961,19 +760,20 @@ static int ceph_compare_super(struct super_block *sb, void *data)
961 */ 760 */
962static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 761static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
963 762
964static int ceph_register_bdi(struct super_block *sb, struct ceph_client *client) 763static int ceph_register_bdi(struct super_block *sb,
764 struct ceph_fs_client *fsc)
965{ 765{
966 int err; 766 int err;
967 767
968 /* set ra_pages based on rsize mount option? */ 768 /* set ra_pages based on rsize mount option? */
969 if (client->mount_args->rsize >= PAGE_CACHE_SIZE) 769 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
970 client->backing_dev_info.ra_pages = 770 fsc->backing_dev_info.ra_pages =
971 (client->mount_args->rsize + PAGE_CACHE_SIZE - 1) 771 (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
972 >> PAGE_SHIFT; 772 >> PAGE_SHIFT;
973 err = bdi_register(&client->backing_dev_info, NULL, "ceph-%d", 773 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
974 atomic_long_inc_return(&bdi_seq)); 774 atomic_long_inc_return(&bdi_seq));
975 if (!err) 775 if (!err)
976 sb->s_bdi = &client->backing_dev_info; 776 sb->s_bdi = &fsc->backing_dev_info;
977 return err; 777 return err;
978} 778}
979 779
@@ -982,46 +782,52 @@ static int ceph_get_sb(struct file_system_type *fs_type,
982 struct vfsmount *mnt) 782 struct vfsmount *mnt)
983{ 783{
984 struct super_block *sb; 784 struct super_block *sb;
985 struct ceph_client *client; 785 struct ceph_fs_client *fsc;
986 int err; 786 int err;
987 int (*compare_super)(struct super_block *, void *) = ceph_compare_super; 787 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
988 const char *path = NULL; 788 const char *path = NULL;
989 struct ceph_mount_args *args; 789 struct ceph_mount_options *fsopt = NULL;
790 struct ceph_options *opt = NULL;
990 791
991 dout("ceph_get_sb\n"); 792 dout("ceph_get_sb\n");
992 args = parse_mount_args(flags, data, dev_name, &path); 793 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
993 if (IS_ERR(args)) { 794 if (err < 0)
994 err = PTR_ERR(args);
995 goto out_final; 795 goto out_final;
996 }
997 796
998 /* create client (which we may/may not use) */ 797 /* create client (which we may/may not use) */
999 client = ceph_create_client(args); 798 fsc = create_fs_client(fsopt, opt);
1000 if (IS_ERR(client)) { 799 if (IS_ERR(fsc)) {
1001 err = PTR_ERR(client); 800 err = PTR_ERR(fsc);
801 kfree(fsopt);
802 kfree(opt);
1002 goto out_final; 803 goto out_final;
1003 } 804 }
1004 805
1005 if (client->mount_args->flags & CEPH_OPT_NOSHARE) 806 err = ceph_mdsc_init(fsc);
807 if (err < 0)
808 goto out;
809
810 if (ceph_test_opt(fsc->client, NOSHARE))
1006 compare_super = NULL; 811 compare_super = NULL;
1007 sb = sget(fs_type, compare_super, ceph_set_super, client); 812 sb = sget(fs_type, compare_super, ceph_set_super, fsc);
1008 if (IS_ERR(sb)) { 813 if (IS_ERR(sb)) {
1009 err = PTR_ERR(sb); 814 err = PTR_ERR(sb);
1010 goto out; 815 goto out;
1011 } 816 }
1012 817
1013 if (ceph_sb_to_client(sb) != client) { 818 if (ceph_sb_to_client(sb) != fsc) {
1014 ceph_destroy_client(client); 819 ceph_mdsc_destroy(fsc);
1015 client = ceph_sb_to_client(sb); 820 destroy_fs_client(fsc);
1016 dout("get_sb got existing client %p\n", client); 821 fsc = ceph_sb_to_client(sb);
822 dout("get_sb got existing client %p\n", fsc);
1017 } else { 823 } else {
1018 dout("get_sb using new client %p\n", client); 824 dout("get_sb using new client %p\n", fsc);
1019 err = ceph_register_bdi(sb, client); 825 err = ceph_register_bdi(sb, fsc);
1020 if (err < 0) 826 if (err < 0)
1021 goto out_splat; 827 goto out_splat;
1022 } 828 }
1023 829
1024 err = ceph_mount(client, mnt, path); 830 err = ceph_mount(fsc, mnt, path);
1025 if (err < 0) 831 if (err < 0)
1026 goto out_splat; 832 goto out_splat;
1027 dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root, 833 dout("root %p inode %p ino %llx.%llx\n", mnt->mnt_root,
@@ -1029,12 +835,13 @@ static int ceph_get_sb(struct file_system_type *fs_type,
1029 return 0; 835 return 0;
1030 836
1031out_splat: 837out_splat:
1032 ceph_mdsc_close_sessions(&client->mdsc); 838 ceph_mdsc_close_sessions(fsc->mdsc);
1033 deactivate_locked_super(sb); 839 deactivate_locked_super(sb);
1034 goto out_final; 840 goto out_final;
1035 841
1036out: 842out:
1037 ceph_destroy_client(client); 843 ceph_mdsc_destroy(fsc);
844 destroy_fs_client(fsc);
1038out_final: 845out_final:
1039 dout("ceph_get_sb fail %d\n", err); 846 dout("ceph_get_sb fail %d\n", err);
1040 return err; 847 return err;
@@ -1042,11 +849,12 @@ out_final:
1042 849
1043static void ceph_kill_sb(struct super_block *s) 850static void ceph_kill_sb(struct super_block *s)
1044{ 851{
1045 struct ceph_client *client = ceph_sb_to_client(s); 852 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1046 dout("kill_sb %p\n", s); 853 dout("kill_sb %p\n", s);
1047 ceph_mdsc_pre_umount(&client->mdsc); 854 ceph_mdsc_pre_umount(fsc->mdsc);
1048 kill_anon_super(s); /* will call put_super after sb is r/o */ 855 kill_anon_super(s); /* will call put_super after sb is r/o */
1049 ceph_destroy_client(client); 856 ceph_mdsc_destroy(fsc);
857 destroy_fs_client(fsc);
1050} 858}
1051 859
1052static struct file_system_type ceph_fs_type = { 860static struct file_system_type ceph_fs_type = {
@@ -1062,36 +870,20 @@ static struct file_system_type ceph_fs_type = {
1062 870
1063static int __init init_ceph(void) 871static int __init init_ceph(void)
1064{ 872{
1065 int ret = 0; 873 int ret = init_caches();
1066
1067 ret = ceph_debugfs_init();
1068 if (ret < 0)
1069 goto out;
1070
1071 ret = ceph_msgr_init();
1072 if (ret < 0)
1073 goto out_debugfs;
1074
1075 ret = init_caches();
1076 if (ret) 874 if (ret)
1077 goto out_msgr; 875 goto out;
1078 876
1079 ret = register_filesystem(&ceph_fs_type); 877 ret = register_filesystem(&ceph_fs_type);
1080 if (ret) 878 if (ret)
1081 goto out_icache; 879 goto out_icache;
1082 880
1083 pr_info("loaded (mon/mds/osd proto %d/%d/%d, osdmap %d/%d %d/%d)\n", 881 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1084 CEPH_MONC_PROTOCOL, CEPH_MDSC_PROTOCOL, CEPH_OSDC_PROTOCOL, 882
1085 CEPH_OSDMAP_VERSION, CEPH_OSDMAP_VERSION_EXT,
1086 CEPH_OSDMAP_INC_VERSION, CEPH_OSDMAP_INC_VERSION_EXT);
1087 return 0; 883 return 0;
1088 884
1089out_icache: 885out_icache:
1090 destroy_caches(); 886 destroy_caches();
1091out_msgr:
1092 ceph_msgr_exit();
1093out_debugfs:
1094 ceph_debugfs_cleanup();
1095out: 887out:
1096 return ret; 888 return ret;
1097} 889}
@@ -1101,8 +893,6 @@ static void __exit exit_ceph(void)
1101 dout("exit_ceph\n"); 893 dout("exit_ceph\n");
1102 unregister_filesystem(&ceph_fs_type); 894 unregister_filesystem(&ceph_fs_type);
1103 destroy_caches(); 895 destroy_caches();
1104 ceph_msgr_exit();
1105 ceph_debugfs_cleanup();
1106} 896}
1107 897
1108module_init(init_ceph); 898module_init(init_ceph);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b87638e84c4b..e2e904442ce2 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -1,7 +1,7 @@
1#ifndef _FS_CEPH_SUPER_H 1#ifndef _FS_CEPH_SUPER_H
2#define _FS_CEPH_SUPER_H 2#define _FS_CEPH_SUPER_H
3 3
4#include "ceph_debug.h" 4#include <linux/ceph/ceph_debug.h>
5 5
6#include <asm/unaligned.h> 6#include <asm/unaligned.h>
7#include <linux/backing-dev.h> 7#include <linux/backing-dev.h>
@@ -14,13 +14,7 @@
14#include <linux/writeback.h> 14#include <linux/writeback.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include "types.h" 17#include <linux/ceph/libceph.h>
18#include "messenger.h"
19#include "msgpool.h"
20#include "mon_client.h"
21#include "mds_client.h"
22#include "osd_client.h"
23#include "ceph_fs.h"
24 18
25/* f_type in struct statfs */ 19/* f_type in struct statfs */
26#define CEPH_SUPER_MAGIC 0x00c36400 20#define CEPH_SUPER_MAGIC 0x00c36400
@@ -30,42 +24,25 @@
30#define CEPH_BLOCK_SHIFT 20 /* 1 MB */ 24#define CEPH_BLOCK_SHIFT 20 /* 1 MB */
31#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) 25#define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT)
32 26
33/* 27#define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */
34 * Supported features 28#define CEPH_MOUNT_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
35 */ 29#define CEPH_MOUNT_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
36#define CEPH_FEATURE_SUPPORTED CEPH_FEATURE_NOSRCADDR | CEPH_FEATURE_FLOCK
37#define CEPH_FEATURE_REQUIRED CEPH_FEATURE_NOSRCADDR
38 30
39/* 31#define CEPH_MOUNT_OPT_DEFAULT (CEPH_MOUNT_OPT_RBYTES)
40 * mount options
41 */
42#define CEPH_OPT_FSID (1<<0)
43#define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */
44#define CEPH_OPT_MYIP (1<<2) /* specified my ip */
45#define CEPH_OPT_DIRSTAT (1<<4) /* funky `cat dirname` for stats */
46#define CEPH_OPT_RBYTES (1<<5) /* dir st_bytes = rbytes */
47#define CEPH_OPT_NOCRC (1<<6) /* no data crc on writes */
48#define CEPH_OPT_NOASYNCREADDIR (1<<7) /* no dcache readdir */
49 32
50#define CEPH_OPT_DEFAULT (CEPH_OPT_RBYTES) 33#define ceph_set_mount_opt(fsc, opt) \
34 (fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt;
35#define ceph_test_mount_opt(fsc, opt) \
36 (!!((fsc)->mount_options->flags & CEPH_MOUNT_OPT_##opt))
51 37
52#define ceph_set_opt(client, opt) \ 38#define CEPH_MAX_READDIR_DEFAULT 1024
53 (client)->mount_args->flags |= CEPH_OPT_##opt; 39#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
54#define ceph_test_opt(client, opt) \ 40#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
55 (!!((client)->mount_args->flags & CEPH_OPT_##opt))
56 41
57 42struct ceph_mount_options {
58struct ceph_mount_args {
59 int sb_flags;
60 int flags; 43 int flags;
61 struct ceph_fsid fsid; 44 int sb_flags;
62 struct ceph_entity_addr my_addr; 45
63 int num_mon;
64 struct ceph_entity_addr *mon_addr;
65 int mount_timeout;
66 int osd_idle_ttl;
67 int osd_timeout;
68 int osd_keepalive_timeout;
69 int wsize; 46 int wsize;
70 int rsize; /* max readahead */ 47 int rsize; /* max readahead */
71 int congestion_kb; /* max writeback in flight */ 48 int congestion_kb; /* max writeback in flight */
@@ -73,82 +50,25 @@ struct ceph_mount_args {
73 int cap_release_safety; 50 int cap_release_safety;
74 int max_readdir; /* max readdir result (entires) */ 51 int max_readdir; /* max readdir result (entires) */
75 int max_readdir_bytes; /* max readdir result (bytes) */ 52 int max_readdir_bytes; /* max readdir result (bytes) */
76 char *snapdir_name; /* default ".snap" */
77 char *name;
78 char *secret;
79};
80
81/*
82 * defaults
83 */
84#define CEPH_MOUNT_TIMEOUT_DEFAULT 60
85#define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */
86#define CEPH_OSD_KEEPALIVE_DEFAULT 5
87#define CEPH_OSD_IDLE_TTL_DEFAULT 60
88#define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */
89#define CEPH_MAX_READDIR_DEFAULT 1024
90#define CEPH_MAX_READDIR_BYTES_DEFAULT (512*1024)
91
92#define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024)
93#define CEPH_MSG_MAX_DATA_LEN (16*1024*1024)
94
95#define CEPH_SNAPDIRNAME_DEFAULT ".snap"
96#define CEPH_AUTH_NAME_DEFAULT "guest"
97/*
98 * Delay telling the MDS we no longer want caps, in case we reopen
99 * the file. Delay a minimum amount of time, even if we send a cap
100 * message for some other reason. Otherwise, take the oppotunity to
101 * update the mds to avoid sending another message later.
102 */
103#define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */
104#define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */
105
106#define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4)
107
108/* mount state */
109enum {
110 CEPH_MOUNT_MOUNTING,
111 CEPH_MOUNT_MOUNTED,
112 CEPH_MOUNT_UNMOUNTING,
113 CEPH_MOUNT_UNMOUNTED,
114 CEPH_MOUNT_SHUTDOWN,
115};
116 53
117/* 54 /*
118 * subtract jiffies 55 * everything above this point can be memcmp'd; everything below
119 */ 56 * is handled in compare_mount_options()
120static inline unsigned long time_sub(unsigned long a, unsigned long b) 57 */
121{
122 BUG_ON(time_after(b, a));
123 return (long)a - (long)b;
124}
125
126/*
127 * per-filesystem client state
128 *
129 * possibly shared by multiple mount points, if they are
130 * mounting the same ceph filesystem/cluster.
131 */
132struct ceph_client {
133 struct ceph_fsid fsid;
134 bool have_fsid;
135 58
136 struct mutex mount_mutex; /* serialize mount attempts */ 59 char *snapdir_name; /* default ".snap" */
137 struct ceph_mount_args *mount_args; 60};
138 61
62struct ceph_fs_client {
139 struct super_block *sb; 63 struct super_block *sb;
140 64
141 unsigned long mount_state; 65 struct ceph_mount_options *mount_options;
142 wait_queue_head_t auth_wq; 66 struct ceph_client *client;
143
144 int auth_err;
145 67
68 unsigned long mount_state;
146 int min_caps; /* min caps i added */ 69 int min_caps; /* min caps i added */
147 70
148 struct ceph_messenger *msgr; /* messenger instance */ 71 struct ceph_mds_client *mdsc;
149 struct ceph_mon_client monc;
150 struct ceph_mds_client mdsc;
151 struct ceph_osd_client osdc;
152 72
153 /* writeback */ 73 /* writeback */
154 mempool_t *wb_pagevec_pool; 74 mempool_t *wb_pagevec_pool;
@@ -160,14 +80,14 @@ struct ceph_client {
160 struct backing_dev_info backing_dev_info; 80 struct backing_dev_info backing_dev_info;
161 81
162#ifdef CONFIG_DEBUG_FS 82#ifdef CONFIG_DEBUG_FS
163 struct dentry *debugfs_monmap; 83 struct dentry *debugfs_dentry_lru, *debugfs_caps;
164 struct dentry *debugfs_mdsmap, *debugfs_osdmap;
165 struct dentry *debugfs_dir, *debugfs_dentry_lru, *debugfs_caps;
166 struct dentry *debugfs_congestion_kb; 84 struct dentry *debugfs_congestion_kb;
167 struct dentry *debugfs_bdi; 85 struct dentry *debugfs_bdi;
86 struct dentry *debugfs_mdsc, *debugfs_mdsmap;
168#endif 87#endif
169}; 88};
170 89
90
171/* 91/*
172 * File i/o capability. This tracks shared state with the metadata 92 * File i/o capability. This tracks shared state with the metadata
173 * server that allows us to cache or writeback attributes or to read 93 * server that allows us to cache or writeback attributes or to read
@@ -275,6 +195,20 @@ struct ceph_inode_xattr {
275 int should_free_val; 195 int should_free_val;
276}; 196};
277 197
198/*
199 * Ceph dentry state
200 */
201struct ceph_dentry_info {
202 struct ceph_mds_session *lease_session;
203 u32 lease_gen, lease_shared_gen;
204 u32 lease_seq;
205 unsigned long lease_renew_after, lease_renew_from;
206 struct list_head lru;
207 struct dentry *dentry;
208 u64 time;
209 u64 offset;
210};
211
278struct ceph_inode_xattrs_info { 212struct ceph_inode_xattrs_info {
279 /* 213 /*
280 * (still encoded) xattr blob. we avoid the overhead of parsing 214 * (still encoded) xattr blob. we avoid the overhead of parsing
@@ -296,11 +230,6 @@ struct ceph_inode_xattrs_info {
296/* 230/*
297 * Ceph inode. 231 * Ceph inode.
298 */ 232 */
299#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
300#define CEPH_I_NODELAY 4 /* do not delay cap release */
301#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
302#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
303
304struct ceph_inode_info { 233struct ceph_inode_info {
305 struct ceph_vino i_vino; /* ceph ino + snap */ 234 struct ceph_vino i_vino; /* ceph ino + snap */
306 235
@@ -391,6 +320,63 @@ static inline struct ceph_inode_info *ceph_inode(struct inode *inode)
391 return container_of(inode, struct ceph_inode_info, vfs_inode); 320 return container_of(inode, struct ceph_inode_info, vfs_inode);
392} 321}
393 322
323static inline struct ceph_vino ceph_vino(struct inode *inode)
324{
325 return ceph_inode(inode)->i_vino;
326}
327
328/*
329 * ino_t is <64 bits on many architectures, blech.
330 *
331 * don't include snap in ino hash, at least for now.
332 */
333static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
334{
335 ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
336#if BITS_PER_LONG == 32
337 ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
338 if (!ino)
339 ino = 1;
340#endif
341 return ino;
342}
343
344/* for printf-style formatting */
345#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
346
347static inline u64 ceph_ino(struct inode *inode)
348{
349 return ceph_inode(inode)->i_vino.ino;
350}
351static inline u64 ceph_snap(struct inode *inode)
352{
353 return ceph_inode(inode)->i_vino.snap;
354}
355
356static inline int ceph_ino_compare(struct inode *inode, void *data)
357{
358 struct ceph_vino *pvino = (struct ceph_vino *)data;
359 struct ceph_inode_info *ci = ceph_inode(inode);
360 return ci->i_vino.ino == pvino->ino &&
361 ci->i_vino.snap == pvino->snap;
362}
363
364static inline struct inode *ceph_find_inode(struct super_block *sb,
365 struct ceph_vino vino)
366{
367 ino_t t = ceph_vino_to_ino(vino);
368 return ilookup5(sb, t, ceph_ino_compare, &vino);
369}
370
371
372/*
373 * Ceph inode.
374 */
375#define CEPH_I_COMPLETE 1 /* we have complete directory cached */
376#define CEPH_I_NODELAY 4 /* do not delay cap release */
377#define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */
378#define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */
379
394static inline void ceph_i_clear(struct inode *inode, unsigned mask) 380static inline void ceph_i_clear(struct inode *inode, unsigned mask)
395{ 381{
396 struct ceph_inode_info *ci = ceph_inode(inode); 382 struct ceph_inode_info *ci = ceph_inode(inode);
@@ -432,20 +418,6 @@ extern u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
432 struct ceph_inode_frag *pfrag, 418 struct ceph_inode_frag *pfrag,
433 int *found); 419 int *found);
434 420
435/*
436 * Ceph dentry state
437 */
438struct ceph_dentry_info {
439 struct ceph_mds_session *lease_session;
440 u32 lease_gen, lease_shared_gen;
441 u32 lease_seq;
442 unsigned long lease_renew_after, lease_renew_from;
443 struct list_head lru;
444 struct dentry *dentry;
445 u64 time;
446 u64 offset;
447};
448
449static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry) 421static inline struct ceph_dentry_info *ceph_dentry(struct dentry *dentry)
450{ 422{
451 return (struct ceph_dentry_info *)dentry->d_fsdata; 423 return (struct ceph_dentry_info *)dentry->d_fsdata;
@@ -456,22 +428,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off)
456 return ((loff_t)frag << 32) | (loff_t)off; 428 return ((loff_t)frag << 32) | (loff_t)off;
457} 429}
458 430
459/*
460 * ino_t is <64 bits on many architectures, blech.
461 *
462 * don't include snap in ino hash, at least for now.
463 */
464static inline ino_t ceph_vino_to_ino(struct ceph_vino vino)
465{
466 ino_t ino = (ino_t)vino.ino; /* ^ (vino.snap << 20); */
467#if BITS_PER_LONG == 32
468 ino ^= vino.ino >> (sizeof(u64)-sizeof(ino_t)) * 8;
469 if (!ino)
470 ino = 1;
471#endif
472 return ino;
473}
474
475static inline int ceph_set_ino_cb(struct inode *inode, void *data) 431static inline int ceph_set_ino_cb(struct inode *inode, void *data)
476{ 432{
477 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; 433 ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
@@ -479,39 +435,6 @@ static inline int ceph_set_ino_cb(struct inode *inode, void *data)
479 return 0; 435 return 0;
480} 436}
481 437
482static inline struct ceph_vino ceph_vino(struct inode *inode)
483{
484 return ceph_inode(inode)->i_vino;
485}
486
487/* for printf-style formatting */
488#define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap
489
490static inline u64 ceph_ino(struct inode *inode)
491{
492 return ceph_inode(inode)->i_vino.ino;
493}
494static inline u64 ceph_snap(struct inode *inode)
495{
496 return ceph_inode(inode)->i_vino.snap;
497}
498
499static inline int ceph_ino_compare(struct inode *inode, void *data)
500{
501 struct ceph_vino *pvino = (struct ceph_vino *)data;
502 struct ceph_inode_info *ci = ceph_inode(inode);
503 return ci->i_vino.ino == pvino->ino &&
504 ci->i_vino.snap == pvino->snap;
505}
506
507static inline struct inode *ceph_find_inode(struct super_block *sb,
508 struct ceph_vino vino)
509{
510 ino_t t = ceph_vino_to_ino(vino);
511 return ilookup5(sb, t, ceph_ino_compare, &vino);
512}
513
514
515/* 438/*
516 * caps helpers 439 * caps helpers
517 */ 440 */
@@ -576,18 +499,18 @@ extern int ceph_reserve_caps(struct ceph_mds_client *mdsc,
576 struct ceph_cap_reservation *ctx, int need); 499 struct ceph_cap_reservation *ctx, int need);
577extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc, 500extern int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
578 struct ceph_cap_reservation *ctx); 501 struct ceph_cap_reservation *ctx);
579extern void ceph_reservation_status(struct ceph_client *client, 502extern void ceph_reservation_status(struct ceph_fs_client *client,
580 int *total, int *avail, int *used, 503 int *total, int *avail, int *used,
581 int *reserved, int *min); 504 int *reserved, int *min);
582 505
583static inline struct ceph_client *ceph_inode_to_client(struct inode *inode) 506static inline struct ceph_fs_client *ceph_inode_to_client(struct inode *inode)
584{ 507{
585 return (struct ceph_client *)inode->i_sb->s_fs_info; 508 return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
586} 509}
587 510
588static inline struct ceph_client *ceph_sb_to_client(struct super_block *sb) 511static inline struct ceph_fs_client *ceph_sb_to_client(struct super_block *sb)
589{ 512{
590 return (struct ceph_client *)sb->s_fs_info; 513 return (struct ceph_fs_client *)sb->s_fs_info;
591} 514}
592 515
593 516
@@ -617,51 +540,6 @@ struct ceph_file_info {
617 540
618 541
619/* 542/*
620 * snapshots
621 */
622
623/*
624 * A "snap context" is the set of existing snapshots when we
625 * write data. It is used by the OSD to guide its COW behavior.
626 *
627 * The ceph_snap_context is refcounted, and attached to each dirty
628 * page, indicating which context the dirty data belonged when it was
629 * dirtied.
630 */
631struct ceph_snap_context {
632 atomic_t nref;
633 u64 seq;
634 int num_snaps;
635 u64 snaps[];
636};
637
638static inline struct ceph_snap_context *
639ceph_get_snap_context(struct ceph_snap_context *sc)
640{
641 /*
642 printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
643 atomic_read(&sc->nref)+1);
644 */
645 if (sc)
646 atomic_inc(&sc->nref);
647 return sc;
648}
649
650static inline void ceph_put_snap_context(struct ceph_snap_context *sc)
651{
652 if (!sc)
653 return;
654 /*
655 printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref),
656 atomic_read(&sc->nref)-1);
657 */
658 if (atomic_dec_and_test(&sc->nref)) {
659 /*printk(" deleting snap_context %p\n", sc);*/
660 kfree(sc);
661 }
662}
663
664/*
665 * A "snap realm" describes a subset of the file hierarchy sharing 543 * A "snap realm" describes a subset of the file hierarchy sharing
666 * the same set of snapshots that apply to it. The realms themselves 544 * the same set of snapshots that apply to it. The realms themselves
667 * are organized into a hierarchy, such that children inherit (some of) 545 * are organized into a hierarchy, such that children inherit (some of)
@@ -699,16 +577,33 @@ struct ceph_snap_realm {
699 spinlock_t inodes_with_caps_lock; 577 spinlock_t inodes_with_caps_lock;
700}; 578};
701 579
702 580static inline int default_congestion_kb(void)
703
704/*
705 * calculate the number of pages a given length and offset map onto,
706 * if we align the data.
707 */
708static inline int calc_pages_for(u64 off, u64 len)
709{ 581{
710 return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - 582 int congestion_kb;
711 (off >> PAGE_CACHE_SHIFT); 583
584 /*
585 * Copied from NFS
586 *
587 * congestion size, scale with available memory.
588 *
589 * 64MB: 8192k
590 * 128MB: 11585k
591 * 256MB: 16384k
592 * 512MB: 23170k
593 * 1GB: 32768k
594 * 2GB: 46340k
595 * 4GB: 65536k
596 * 8GB: 92681k
597 * 16GB: 131072k
598 *
599 * This allows larger machines to have larger/more transfers.
600 * Limit the default to 256M
601 */
602 congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
603 if (congestion_kb > 256*1024)
604 congestion_kb = 256*1024;
605
606 return congestion_kb;
712} 607}
713 608
714 609
@@ -741,16 +636,6 @@ static inline bool __ceph_have_pending_cap_snap(struct ceph_inode_info *ci)
741 ci_item)->writing; 636 ci_item)->writing;
742} 637}
743 638
744
745/* super.c */
746extern struct kmem_cache *ceph_inode_cachep;
747extern struct kmem_cache *ceph_cap_cachep;
748extern struct kmem_cache *ceph_dentry_cachep;
749extern struct kmem_cache *ceph_file_cachep;
750
751extern const char *ceph_msg_type_name(int type);
752extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
753
754/* inode.c */ 639/* inode.c */
755extern const struct inode_operations ceph_file_iops; 640extern const struct inode_operations ceph_file_iops;
756 641
@@ -857,12 +742,18 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
857/* file.c */ 742/* file.c */
858extern const struct file_operations ceph_file_fops; 743extern const struct file_operations ceph_file_fops;
859extern const struct address_space_operations ceph_aops; 744extern const struct address_space_operations ceph_aops;
745extern int ceph_copy_to_page_vector(struct page **pages,
746 const char *data,
747 loff_t off, size_t len);
748extern int ceph_copy_from_page_vector(struct page **pages,
749 char *data,
750 loff_t off, size_t len);
751extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
860extern int ceph_open(struct inode *inode, struct file *file); 752extern int ceph_open(struct inode *inode, struct file *file);
861extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry, 753extern struct dentry *ceph_lookup_open(struct inode *dir, struct dentry *dentry,
862 struct nameidata *nd, int mode, 754 struct nameidata *nd, int mode,
863 int locked_dir); 755 int locked_dir);
864extern int ceph_release(struct inode *inode, struct file *filp); 756extern int ceph_release(struct inode *inode, struct file *filp);
865extern void ceph_release_page_vector(struct page **pages, int num_pages);
866 757
867/* dir.c */ 758/* dir.c */
868extern const struct file_operations ceph_dir_fops; 759extern const struct file_operations ceph_dir_fops;
@@ -892,12 +783,6 @@ extern long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
892/* export.c */ 783/* export.c */
893extern const struct export_operations ceph_export_ops; 784extern const struct export_operations ceph_export_ops;
894 785
895/* debugfs.c */
896extern int ceph_debugfs_init(void);
897extern void ceph_debugfs_cleanup(void);
898extern int ceph_debugfs_client_init(struct ceph_client *client);
899extern void ceph_debugfs_client_cleanup(struct ceph_client *client);
900
901/* locks.c */ 786/* locks.c */
902extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl); 787extern int ceph_lock(struct file *file, int cmd, struct file_lock *fl);
903extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl); 788extern int ceph_flock(struct file *file, int cmd, struct file_lock *fl);
@@ -914,4 +799,8 @@ static inline struct inode *get_dentry_parent_inode(struct dentry *dentry)
914 return NULL; 799 return NULL;
915} 800}
916 801
802/* debugfs.c */
803extern int ceph_fs_debugfs_init(struct ceph_fs_client *client);
804extern void ceph_fs_debugfs_cleanup(struct ceph_fs_client *client);
805
917#endif /* _FS_CEPH_SUPER_H */ 806#endif /* _FS_CEPH_SUPER_H */
diff --git a/fs/ceph/types.h b/fs/ceph/types.h
deleted file mode 100644
index 28b35a005ec2..000000000000
--- a/fs/ceph/types.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef _FS_CEPH_TYPES_H
2#define _FS_CEPH_TYPES_H
3
4/* needed before including ceph_fs.h */
5#include <linux/in.h>
6#include <linux/types.h>
7#include <linux/fcntl.h>
8#include <linux/string.h>
9
10#include "ceph_fs.h"
11#include "ceph_frag.h"
12#include "ceph_hash.h"
13
14/*
15 * Identify inodes by both their ino AND snapshot id (a u64).
16 */
17struct ceph_vino {
18 u64 ino;
19 u64 snap;
20};
21
22
23/* context for the caps reservation mechanism */
24struct ceph_cap_reservation {
25 int count;
26};
27
28
29#endif
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 9578af610b73..70e919960acb 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1,6 +1,9 @@
1#include "ceph_debug.h" 1#include <linux/ceph/ceph_debug.h>
2
2#include "super.h" 3#include "super.h"
3#include "decode.h" 4#include "mds_client.h"
5
6#include <linux/ceph/decode.h>
4 7
5#include <linux/xattr.h> 8#include <linux/xattr.h>
6#include <linux/slab.h> 9#include <linux/slab.h>
@@ -620,12 +623,12 @@ out:
620static int ceph_sync_setxattr(struct dentry *dentry, const char *name, 623static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
621 const char *value, size_t size, int flags) 624 const char *value, size_t size, int flags)
622{ 625{
623 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); 626 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
624 struct inode *inode = dentry->d_inode; 627 struct inode *inode = dentry->d_inode;
625 struct ceph_inode_info *ci = ceph_inode(inode); 628 struct ceph_inode_info *ci = ceph_inode(inode);
626 struct inode *parent_inode = dentry->d_parent->d_inode; 629 struct inode *parent_inode = dentry->d_parent->d_inode;
627 struct ceph_mds_request *req; 630 struct ceph_mds_request *req;
628 struct ceph_mds_client *mdsc = &client->mdsc; 631 struct ceph_mds_client *mdsc = fsc->mdsc;
629 int err; 632 int err;
630 int i, nr_pages; 633 int i, nr_pages;
631 struct page **pages = NULL; 634 struct page **pages = NULL;
@@ -777,8 +780,8 @@ out:
777 780
778static int ceph_send_removexattr(struct dentry *dentry, const char *name) 781static int ceph_send_removexattr(struct dentry *dentry, const char *name)
779{ 782{
780 struct ceph_client *client = ceph_sb_to_client(dentry->d_sb); 783 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
781 struct ceph_mds_client *mdsc = &client->mdsc; 784 struct ceph_mds_client *mdsc = fsc->mdsc;
782 struct inode *inode = dentry->d_inode; 785 struct inode *inode = dentry->d_inode;
783 struct inode *parent_inode = dentry->d_parent->d_inode; 786 struct inode *parent_inode = dentry->d_parent->d_inode;
784 struct ceph_mds_request *req; 787 struct ceph_mds_request *req;