aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ceph/addr.c
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2013-04-03 22:32:51 -0400
committerSage Weil <sage@inktank.com>2013-05-02 00:18:12 -0400
commit79528734f3ae4699a2886f62f55e18fb34fb3651 (patch)
tree51905378486b592fc2d4037d67ef3b577fe4eaa7 /fs/ceph/addr.c
parent430c28c3cb7f3dbd87de266ed52d65928957ff78 (diff)
libceph: keep source rather than message osd op array
An osd request keeps a pointer to the osd operations (ops) array that it builds in its request message. In order to allow each op in the array to have its own distinct data, we will need to keep track of each op's data, and that information does not go over the wire. As long as we're tracking the data we might as well just track the entire (source) op definition for each of the ops. And if we're doing that, we'll have no more need to keep a pointer to the wire-encoded version. This patch makes the array of source ops be kept with the osd request structure, and uses that instead of the version encoded in the message in places where that was previously used. The array will be embedded in the request structure, and the maximum number of ops we ever actually use is currently 2. So reduce CEPH_OSD_MAX_OP to 2 to reduce the size of the structure. The result of doing this sort of ripples back up, and as a result various function parameters and local variables become unnecessary. Make r_num_ops be unsigned, and move the definition of struct ceph_osd_req_op earlier to ensure it's defined where needed. It does not yet add per-op data, that's coming soon. This resolves: http://tracker.ceph.com/issues/4656 Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'fs/ceph/addr.c')
-rw-r--r--fs/ceph/addr.c21
1 files changed, 9 insertions, 12 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 127be29a6c22..c9da074f0fe6 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -288,7 +288,6 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
288 struct page *page = list_entry(page_list->prev, struct page, lru); 288 struct page *page = list_entry(page_list->prev, struct page, lru);
289 struct ceph_vino vino; 289 struct ceph_vino vino;
290 struct ceph_osd_request *req; 290 struct ceph_osd_request *req;
291 struct ceph_osd_req_op op;
292 u64 off; 291 u64 off;
293 u64 len; 292 u64 len;
294 int i; 293 int i;
@@ -314,7 +313,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
314 off, len); 313 off, len);
315 vino = ceph_vino(inode); 314 vino = ceph_vino(inode);
316 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, 315 req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len,
317 1, &op, CEPH_OSD_OP_READ, 316 1, CEPH_OSD_OP_READ,
318 CEPH_OSD_FLAG_READ, NULL, 317 CEPH_OSD_FLAG_READ, NULL,
319 ci->i_truncate_seq, ci->i_truncate_size, 318 ci->i_truncate_seq, ci->i_truncate_size,
320 false); 319 false);
@@ -349,7 +348,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
349 req->r_callback = finish_read; 348 req->r_callback = finish_read;
350 req->r_inode = inode; 349 req->r_inode = inode;
351 350
352 ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); 351 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL);
353 352
354 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); 353 dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len);
355 ret = ceph_osdc_start_request(osdc, req, false); 354 ret = ceph_osdc_start_request(osdc, req, false);
@@ -567,7 +566,7 @@ static void writepages_finish(struct ceph_osd_request *req,
567 struct ceph_snap_context *snapc = req->r_snapc; 566 struct ceph_snap_context *snapc = req->r_snapc;
568 struct address_space *mapping = inode->i_mapping; 567 struct address_space *mapping = inode->i_mapping;
569 int rc = req->r_result; 568 int rc = req->r_result;
570 u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length); 569 u64 bytes = req->r_ops[0].extent.length;
571 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 570 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
572 long writeback_stat; 571 long writeback_stat;
573 unsigned issued = ceph_caps_issued(ci); 572 unsigned issued = ceph_caps_issued(ci);
@@ -635,8 +634,7 @@ static void writepages_finish(struct ceph_osd_request *req,
635 634
636static struct ceph_osd_request * 635static struct ceph_osd_request *
637ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len, 636ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len,
638 struct ceph_snap_context *snapc, 637 struct ceph_snap_context *snapc, int num_ops)
639 int num_ops, struct ceph_osd_req_op *ops)
640{ 638{
641 struct ceph_fs_client *fsc; 639 struct ceph_fs_client *fsc;
642 struct ceph_inode_info *ci; 640 struct ceph_inode_info *ci;
@@ -648,7 +646,7 @@ ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len,
648 /* BUG_ON(vino.snap != CEPH_NOSNAP); */ 646 /* BUG_ON(vino.snap != CEPH_NOSNAP); */
649 647
650 return ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, 648 return ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
651 vino, offset, len, num_ops, ops, CEPH_OSD_OP_WRITE, 649 vino, offset, len, num_ops, CEPH_OSD_OP_WRITE,
652 CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK, 650 CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK,
653 snapc, ci->i_truncate_seq, ci->i_truncate_size, true); 651 snapc, ci->i_truncate_seq, ci->i_truncate_size, true);
654} 652}
@@ -738,7 +736,6 @@ retry:
738 last_snapc = snapc; 736 last_snapc = snapc;
739 737
740 while (!done && index <= end) { 738 while (!done && index <= end) {
741 struct ceph_osd_req_op ops[2];
742 int num_ops = do_sync ? 2 : 1; 739 int num_ops = do_sync ? 2 : 1;
743 struct ceph_vino vino; 740 struct ceph_vino vino;
744 unsigned i; 741 unsigned i;
@@ -846,7 +843,7 @@ get_more_pages:
846 len = wsize; 843 len = wsize;
847 req = ceph_writepages_osd_request(inode, 844 req = ceph_writepages_osd_request(inode,
848 offset, &len, snapc, 845 offset, &len, snapc,
849 num_ops, ops); 846 num_ops);
850 847
851 if (IS_ERR(req)) { 848 if (IS_ERR(req)) {
852 rc = PTR_ERR(req); 849 rc = PTR_ERR(req);
@@ -927,11 +924,11 @@ get_more_pages:
927 924
928 /* Update the write op length in case we changed it */ 925 /* Update the write op length in case we changed it */
929 926
930 osd_req_op_extent_update(&ops[0], len); 927 osd_req_op_extent_update(&req->r_ops[0], len);
931 928
932 vino = ceph_vino(inode); 929 vino = ceph_vino(inode);
933 ceph_osdc_build_request(req, offset, num_ops, ops, 930 ceph_osdc_build_request(req, offset, snapc, vino.snap,
934 snapc, vino.snap, &inode->i_mtime); 931 &inode->i_mtime);
935 932
936 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); 933 rc = ceph_osdc_start_request(&fsc->client->osdc, req, true);
937 BUG_ON(rc); 934 BUG_ON(rc);