aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAlex Elder <elder@inktank.com>2013-04-05 02:27:12 -0400
committerSage Weil <sage@inktank.com>2013-05-02 00:18:23 -0400
commita4ce40a9a7c1053ac2a41cf64255e44e356e5522 (patch)
treede1aeb42625f19f6cfe8806db333ec853de0f5b7 /net
parent39b44cbe86db42e70693787b2ede81c309925d0b (diff)
libceph: combine initializing and setting osd data
This ends up being a rather large patch but what it's doing is somewhat straightforward. Basically, this is replacing two calls with one. The first of the two calls is initializing a struct ceph_osd_data with data (either a page array, a page list, or a bio list); the second is setting an osd request op so it associates that data with one of the op's parameters. In place of those two will be a single function that initializes the op directly. That means we sort of fan out a set of the needed functions: - extent ops with pages data - extent ops with pagelist data - extent ops with bio list data and - class ops with page data for receiving a response We also have define another one, but it's only used internally: - class ops with pagelist data for request parameters Note that we *still* haven't gotten rid of the osd request's r_data_in and r_data_out fields. All the osd ops refer to them for their data. For now, these data fields are pointers assigned to the appropriate r_data_* field when these new functions are called. Signed-off-by: Alex Elder <elder@inktank.com> Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
Diffstat (limited to 'net')
-rw-r--r--net/ceph/osd_client.c155
1 files changed, 122 insertions, 33 deletions
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 932b8af8b8ee..86cb52404f17 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1,3 +1,4 @@
1
1#include <linux/ceph/ceph_debug.h> 2#include <linux/ceph/ceph_debug.h>
2 3
3#include <linux/module.h> 4#include <linux/module.h>
@@ -85,7 +86,7 @@ static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
85 osd_data->type = CEPH_OSD_DATA_TYPE_NONE; 86 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
86} 87}
87 88
88void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, 89static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
89 struct page **pages, u64 length, u32 alignment, 90 struct page **pages, u64 length, u32 alignment,
90 bool pages_from_pool, bool own_pages) 91 bool pages_from_pool, bool own_pages)
91{ 92{
@@ -96,27 +97,131 @@ void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
96 osd_data->pages_from_pool = pages_from_pool; 97 osd_data->pages_from_pool = pages_from_pool;
97 osd_data->own_pages = own_pages; 98 osd_data->own_pages = own_pages;
98} 99}
99EXPORT_SYMBOL(ceph_osd_data_pages_init);
100 100
101void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, 101static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
102 struct ceph_pagelist *pagelist) 102 struct ceph_pagelist *pagelist)
103{ 103{
104 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; 104 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
105 osd_data->pagelist = pagelist; 105 osd_data->pagelist = pagelist;
106} 106}
107EXPORT_SYMBOL(ceph_osd_data_pagelist_init);
108 107
109#ifdef CONFIG_BLOCK 108#ifdef CONFIG_BLOCK
110void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, 109static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
111 struct bio *bio, size_t bio_length) 110 struct bio *bio, size_t bio_length)
112{ 111{
113 osd_data->type = CEPH_OSD_DATA_TYPE_BIO; 112 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
114 osd_data->bio = bio; 113 osd_data->bio = bio;
115 osd_data->bio_length = bio_length; 114 osd_data->bio_length = bio_length;
116} 115}
117EXPORT_SYMBOL(ceph_osd_data_bio_init);
118#endif /* CONFIG_BLOCK */ 116#endif /* CONFIG_BLOCK */
119 117
118struct ceph_osd_data *
119osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
120 unsigned int which, bool write_request)
121{
122 BUG_ON(which >= osd_req->r_num_ops);
123
124 /* return &osd_req->r_ops[which].extent.osd_data; */
125 return write_request ? &osd_req->r_data_out : &osd_req->r_data_in;
126}
127EXPORT_SYMBOL(osd_req_op_extent_osd_data);
128
129struct ceph_osd_data *
130osd_req_op_cls_request_info(struct ceph_osd_request *osd_req,
131 unsigned int which)
132{
133 BUG_ON(which >= osd_req->r_num_ops);
134
135 /* return &osd_req->r_ops[which].cls.request_info; */
136 return &osd_req->r_data_out; /* Request data is outgoing */
137}
138EXPORT_SYMBOL(osd_req_op_cls_request_info); /* ??? */
139
140struct ceph_osd_data *
141osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
142 unsigned int which)
143{
144 BUG_ON(which >= osd_req->r_num_ops);
145
146 /* return &osd_req->r_ops[which].cls.response_data; */
147 return &osd_req->r_data_in; /* Response data is incoming */
148}
149EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */
150
151void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
152 unsigned int which, bool write_request,
153 struct page **pages, u64 length, u32 alignment,
154 bool pages_from_pool, bool own_pages)
155{
156 struct ceph_osd_data *osd_data;
157
158 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
159 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
160 pages_from_pool, own_pages);
161
162 osd_req->r_ops[which].extent.osd_data =
163 osd_req_op_extent_osd_data(osd_req, which, write_request);
164}
165EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
166
167void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
168 unsigned int which, bool write_request,
169 struct ceph_pagelist *pagelist)
170{
171 struct ceph_osd_data *osd_data;
172
173 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
174 ceph_osd_data_pagelist_init(osd_data, pagelist);
175
176 osd_req->r_ops[which].extent.osd_data =
177 osd_req_op_extent_osd_data(osd_req, which, write_request);
178}
179EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
180
181#ifdef CONFIG_BLOCK
182void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
183 unsigned int which, bool write_request,
184 struct bio *bio, size_t bio_length)
185{
186 struct ceph_osd_data *osd_data;
187
188 osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
189 ceph_osd_data_bio_init(osd_data, bio, bio_length);
190
191 osd_req->r_ops[which].extent.osd_data =
192 osd_req_op_extent_osd_data(osd_req, which, write_request);
193}
194EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
195#endif /* CONFIG_BLOCK */
196
197static void osd_req_op_cls_request_info_pagelist(
198 struct ceph_osd_request *osd_req,
199 unsigned int which, struct ceph_pagelist *pagelist)
200{
201 struct ceph_osd_data *osd_data;
202
203 osd_data = osd_req_op_cls_request_info(osd_req, which);
204 ceph_osd_data_pagelist_init(osd_data, pagelist);
205
206 osd_req->r_ops[which].cls.request_info =
207 osd_req_op_cls_request_info(osd_req, which);
208}
209
210void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
211 unsigned int which, struct page **pages, u64 length,
212 u32 alignment, bool pages_from_pool, bool own_pages)
213{
214 struct ceph_osd_data *osd_data;
215
216 osd_data = osd_req_op_cls_response_data(osd_req, which);
217 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
218 pages_from_pool, own_pages);
219
220 osd_req->r_ops[which].cls.response_data =
221 osd_req_op_cls_response_data(osd_req, which);
222}
223EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
224
120static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) 225static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
121{ 226{
122 switch (osd_data->type) { 227 switch (osd_data->type) {
@@ -385,15 +490,6 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
385} 490}
386EXPORT_SYMBOL(osd_req_op_extent_update); 491EXPORT_SYMBOL(osd_req_op_extent_update);
387 492
388void osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
389 unsigned int which,
390 struct ceph_osd_data *osd_data)
391{
392 BUG_ON(which >= osd_req->r_num_ops);
393 osd_req->r_ops[which].extent.osd_data = osd_data;
394}
395EXPORT_SYMBOL(osd_req_op_extent_osd_data);
396
397void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, 493void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
398 u16 opcode, const char *class, const char *method, 494 u16 opcode, const char *class, const char *method,
399 const void *request_data, size_t request_data_size) 495 const void *request_data, size_t request_data_size)
@@ -429,22 +525,13 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
429 ceph_pagelist_append(pagelist, request_data, request_data_size); 525 ceph_pagelist_append(pagelist, request_data, request_data_size);
430 payload_len += request_data_size; 526 payload_len += request_data_size;
431 527
432 op->cls.request_info = &osd_req->r_data_out; 528 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
433 ceph_osd_data_pagelist_init(op->cls.request_info, pagelist);
434 529
435 op->cls.argc = 0; /* currently unused */ 530 op->cls.argc = 0; /* currently unused */
436 531
437 op->payload_len = payload_len; 532 op->payload_len = payload_len;
438} 533}
439EXPORT_SYMBOL(osd_req_op_cls_init); 534EXPORT_SYMBOL(osd_req_op_cls_init);
440void osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
441 unsigned int which,
442 struct ceph_osd_data *response_data)
443{
444 BUG_ON(which >= osd_req->r_num_ops);
445 osd_req->r_ops[which].cls.response_data = response_data;
446}
447EXPORT_SYMBOL(osd_req_op_cls_response_data);
448 535
449void osd_req_op_watch_init(struct ceph_osd_request *osd_req, 536void osd_req_op_watch_init(struct ceph_osd_request *osd_req,
450 unsigned int which, u16 opcode, 537 unsigned int which, u16 opcode,
@@ -547,7 +634,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
547 bool use_mempool) 634 bool use_mempool)
548{ 635{
549 struct ceph_osd_request *req; 636 struct ceph_osd_request *req;
550 struct ceph_osd_data *osd_data;
551 u64 objnum = 0; 637 u64 objnum = 0;
552 u64 objoff = 0; 638 u64 objoff = 0;
553 u64 objlen = 0; 639 u64 objlen = 0;
@@ -561,8 +647,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
561 GFP_NOFS); 647 GFP_NOFS);
562 if (!req) 648 if (!req)
563 return ERR_PTR(-ENOMEM); 649 return ERR_PTR(-ENOMEM);
564 osd_data = opcode == CEPH_OSD_OP_WRITE ? &req->r_data_out
565 : &req->r_data_in;
566 650
567 req->r_flags = flags; 651 req->r_flags = flags;
568 652
@@ -585,7 +669,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
585 669
586 osd_req_op_extent_init(req, 0, opcode, objoff, objlen, 670 osd_req_op_extent_init(req, 0, opcode, objoff, objlen,
587 truncate_size, truncate_seq); 671 truncate_size, truncate_seq);
588 osd_req_op_extent_osd_data(req, 0, osd_data);
589 672
590 /* 673 /*
591 * A second op in the ops array means the caller wants to 674 * A second op in the ops array means the caller wants to
@@ -2171,8 +2254,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
2171 2254
2172 /* it may be a short read due to an object boundary */ 2255 /* it may be a short read due to an object boundary */
2173 2256
2174 ceph_osd_data_pages_init(&req->r_data_in, pages, *plen, page_align, 2257 osd_req_op_extent_osd_data_pages(req, 0, false,
2175 false, false); 2258 pages, *plen, page_align, false, false);
2176 2259
2177 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", 2260 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
2178 off, *plen, *plen, page_align); 2261 off, *plen, *plen, page_align);
@@ -2214,7 +2297,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
2214 return PTR_ERR(req); 2297 return PTR_ERR(req);
2215 2298
2216 /* it may be a short write due to an object boundary */ 2299 /* it may be a short write due to an object boundary */
2217 ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align, 2300 osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, page_align,
2218 false, false); 2301 false, false);
2219 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); 2302 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
2220 2303
@@ -2308,8 +2391,14 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2308 m = ceph_msg_get(req->r_reply); 2391 m = ceph_msg_get(req->r_reply);
2309 2392
2310 if (data_len > 0) { 2393 if (data_len > 0) {
2311 struct ceph_osd_data *osd_data = &req->r_data_in; 2394 struct ceph_osd_data *osd_data;
2312 2395
2396 /*
2397 * XXX This is assuming there is only one op containing
2398 * XXX page data. Probably OK for reads, but this
2399 * XXX ought to be done more generally.
2400 */
2401 osd_data = osd_req_op_extent_osd_data(req, 0, false);
2313 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { 2402 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
2314 if (osd_data->pages && 2403 if (osd_data->pages &&
2315 unlikely(osd_data->length < data_len)) { 2404 unlikely(osd_data->length < data_len)) {