diff options
author | Alex Elder <elder@inktank.com> | 2013-03-14 15:09:05 -0400 |
---|---|---|
committer | Sage Weil <sage@inktank.com> | 2013-05-02 00:17:58 -0400 |
commit | acead002b200569273bed331c93c4a91d25e10b8 (patch) | |
tree | 779bf689da149acf73bd75f51641f3700469f6b7 /fs/ceph/addr.c | |
parent | a19308048182d5f9e16b03b1d1c038d9346c7589 (diff) |
libceph: don't build request in ceph_osdc_new_request()
This patch moves the call to ceph_osdc_build_request() out of
ceph_osdc_new_request() and into its caller.
This is in order to defer formatting osd operation information into
the request message until just before request is started.
The only unusual (ab)user of ceph_osdc_build_request() is
ceph_writepages_start(), where the final length of write request may
change (downward) based on the current inode size or the oldest
snapshot context with dirty data for the inode.
The remaining callers don't change anything in the request after has
been built.
This means the ops array is now supplied by the caller. It also
means there is no need to pass the mtime to ceph_osdc_new_request()
(it gets provided to ceph_osdc_build_request()). And rather than
passing a do_sync flag, have the number of ops in the ops array
supplied imply adding a second STARTSYNC operation after the READ or
WRITE requested.
This and some of the patches that follow are related to having the
messenger (only) be responsible for filling the content of the
message header, as described here:
http://tracker.ceph.com/issues/4589
Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'fs/ceph/addr.c')
-rw-r--r-- | fs/ceph/addr.c | 36 |
1 files changed, 23 insertions, 13 deletions
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index ae438d02a422..681463d5459b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c | |||
@@ -284,7 +284,9 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) | |||
284 | &ceph_inode_to_client(inode)->client->osdc; | 284 | &ceph_inode_to_client(inode)->client->osdc; |
285 | struct ceph_inode_info *ci = ceph_inode(inode); | 285 | struct ceph_inode_info *ci = ceph_inode(inode); |
286 | struct page *page = list_entry(page_list->prev, struct page, lru); | 286 | struct page *page = list_entry(page_list->prev, struct page, lru); |
287 | struct ceph_vino vino; | ||
287 | struct ceph_osd_request *req; | 288 | struct ceph_osd_request *req; |
289 | struct ceph_osd_req_op op; | ||
288 | u64 off; | 290 | u64 off; |
289 | u64 len; | 291 | u64 len; |
290 | int i; | 292 | int i; |
@@ -308,16 +310,17 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) | |||
308 | len = nr_pages << PAGE_CACHE_SHIFT; | 310 | len = nr_pages << PAGE_CACHE_SHIFT; |
309 | dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, | 311 | dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, |
310 | off, len); | 312 | off, len); |
311 | 313 | vino = ceph_vino(inode); | |
312 | req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), | 314 | req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, |
313 | off, &len, | 315 | 1, &op, CEPH_OSD_OP_READ, |
314 | CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, | 316 | CEPH_OSD_FLAG_READ, NULL, |
315 | NULL, 0, | ||
316 | ci->i_truncate_seq, ci->i_truncate_size, | 317 | ci->i_truncate_seq, ci->i_truncate_size, |
317 | NULL, false); | 318 | false); |
318 | if (IS_ERR(req)) | 319 | if (IS_ERR(req)) |
319 | return PTR_ERR(req); | 320 | return PTR_ERR(req); |
320 | 321 | ||
322 | ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); | ||
323 | |||
321 | /* build page vector */ | 324 | /* build page vector */ |
322 | nr_pages = calc_pages_for(0, len); | 325 | nr_pages = calc_pages_for(0, len); |
323 | pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); | 326 | pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); |
@@ -736,6 +739,7 @@ retry: | |||
736 | last_snapc = snapc; | 739 | last_snapc = snapc; |
737 | 740 | ||
738 | while (!done && index <= end) { | 741 | while (!done && index <= end) { |
742 | struct ceph_osd_req_op ops[2]; | ||
739 | unsigned i; | 743 | unsigned i; |
740 | int first; | 744 | int first; |
741 | pgoff_t next; | 745 | pgoff_t next; |
@@ -825,20 +829,22 @@ get_more_pages: | |||
825 | 829 | ||
826 | /* ok */ | 830 | /* ok */ |
827 | if (locked_pages == 0) { | 831 | if (locked_pages == 0) { |
832 | struct ceph_vino vino; | ||
833 | int num_ops = do_sync ? 2 : 1; | ||
834 | |||
828 | /* prepare async write request */ | 835 | /* prepare async write request */ |
829 | offset = (u64) page_offset(page); | 836 | offset = (u64) page_offset(page); |
830 | len = wsize; | 837 | len = wsize; |
838 | vino = ceph_vino(inode); | ||
839 | /* BUG_ON(vino.snap != CEPH_NOSNAP); */ | ||
831 | req = ceph_osdc_new_request(&fsc->client->osdc, | 840 | req = ceph_osdc_new_request(&fsc->client->osdc, |
832 | &ci->i_layout, | 841 | &ci->i_layout, vino, offset, &len, |
833 | ceph_vino(inode), | 842 | num_ops, ops, |
834 | offset, &len, | ||
835 | CEPH_OSD_OP_WRITE, | 843 | CEPH_OSD_OP_WRITE, |
836 | CEPH_OSD_FLAG_WRITE | | 844 | CEPH_OSD_FLAG_WRITE | |
837 | CEPH_OSD_FLAG_ONDISK, | 845 | CEPH_OSD_FLAG_ONDISK, |
838 | snapc, do_sync, | 846 | snapc, ci->i_truncate_seq, |
839 | ci->i_truncate_seq, | 847 | ci->i_truncate_size, true); |
840 | ci->i_truncate_size, | ||
841 | &inode->i_mtime, true); | ||
842 | 848 | ||
843 | if (IS_ERR(req)) { | 849 | if (IS_ERR(req)) { |
844 | rc = PTR_ERR(req); | 850 | rc = PTR_ERR(req); |
@@ -846,6 +852,10 @@ get_more_pages: | |||
846 | break; | 852 | break; |
847 | } | 853 | } |
848 | 854 | ||
855 | ceph_osdc_build_request(req, offset, | ||
856 | num_ops, ops, snapc, vino.snap, | ||
857 | &inode->i_mtime); | ||
858 | |||
849 | req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; | 859 | req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; |
850 | req->r_data_out.length = len; | 860 | req->r_data_out.length = len; |
851 | req->r_data_out.alignment = 0; | 861 | req->r_data_out.alignment = 0; |