aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/nfs/objlayout/objio_osd.c272
1 files changed, 128 insertions, 144 deletions
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 2347e0ac63e6..bd7ec26e2840 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -38,7 +38,7 @@
38 */ 38 */
39 39
40#include <linux/module.h> 40#include <linux/module.h>
41#include <scsi/osd_initiator.h> 41#include <scsi/osd_ore.h>
42 42
43#include "objlayout.h" 43#include "objlayout.h"
44 44
@@ -52,7 +52,7 @@ enum { BIO_MAX_PAGES_KMALLOC =
52 52
53struct objio_dev_ent { 53struct objio_dev_ent {
54 struct nfs4_deviceid_node id_node; 54 struct nfs4_deviceid_node id_node;
55 struct osd_dev *od; 55 struct ore_dev od;
56}; 56};
57 57
58static void 58static void
@@ -60,8 +60,8 @@ objio_free_deviceid_node(struct nfs4_deviceid_node *d)
60{ 60{
61 struct objio_dev_ent *de = container_of(d, struct objio_dev_ent, id_node); 61 struct objio_dev_ent *de = container_of(d, struct objio_dev_ent, id_node);
62 62
63 dprintk("%s: free od=%p\n", __func__, de->od); 63 dprintk("%s: free od=%p\n", __func__, de->od.od);
64 osduld_put_device(de->od); 64 osduld_put_device(de->od.od);
65 kfree(de); 65 kfree(de);
66} 66}
67 67
@@ -98,12 +98,12 @@ _dev_list_add(const struct nfs_server *nfss,
98 nfss->pnfs_curr_ld, 98 nfss->pnfs_curr_ld,
99 nfss->nfs_client, 99 nfss->nfs_client,
100 d_id); 100 d_id);
101 de->od = od; 101 de->od.od = od;
102 102
103 d = nfs4_insert_deviceid_node(&de->id_node); 103 d = nfs4_insert_deviceid_node(&de->id_node);
104 n = container_of(d, struct objio_dev_ent, id_node); 104 n = container_of(d, struct objio_dev_ent, id_node);
105 if (n != de) { 105 if (n != de) {
106 dprintk("%s: Race with other n->od=%p\n", __func__, n->od); 106 dprintk("%s: Race with other n->od=%p\n", __func__, n->od.od);
107 objio_free_deviceid_node(&de->id_node); 107 objio_free_deviceid_node(&de->id_node);
108 de = n; 108 de = n;
109 } 109 }
@@ -111,28 +111,11 @@ _dev_list_add(const struct nfs_server *nfss,
111 return de; 111 return de;
112} 112}
113 113
114struct caps_buffers {
115 u8 caps_key[OSD_CRYPTO_KEYID_SIZE];
116 u8 creds[OSD_CAP_LEN];
117};
118
119struct objio_segment { 114struct objio_segment {
120 struct pnfs_layout_segment lseg; 115 struct pnfs_layout_segment lseg;
121 116
122 struct pnfs_osd_object_cred *comps; 117 struct ore_layout layout;
123 118 struct ore_components oc;
124 unsigned mirrors_p1;
125 unsigned stripe_unit;
126 unsigned group_width; /* Data stripe_units without integrity comps */
127 u64 group_depth;
128 unsigned group_count;
129
130 unsigned max_io_size;
131
132 unsigned comps_index;
133 unsigned num_comps;
134 /* variable length */
135 struct objio_dev_ent *ods[];
136}; 119};
137 120
138static inline struct objio_segment * 121static inline struct objio_segment *
@@ -155,7 +138,8 @@ struct objio_state {
155 loff_t offset; 138 loff_t offset;
156 bool sync; 139 bool sync;
157 140
158 struct objio_segment *layout; 141 struct ore_layout *layout;
142 struct ore_components *oc;
159 143
160 struct kref kref; 144 struct kref kref;
161 objio_done_fn done; 145 objio_done_fn done;
@@ -175,32 +159,33 @@ struct objio_state {
175 159
176/* Send and wait for a get_device_info of devices in the layout, 160/* Send and wait for a get_device_info of devices in the layout,
177 then look them up with the osd_initiator library */ 161 then look them up with the osd_initiator library */
178static struct objio_dev_ent *_device_lookup(struct pnfs_layout_hdr *pnfslay, 162static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay,
179 struct objio_segment *objio_seg, unsigned comp, 163 struct objio_segment *objio_seg, unsigned c, struct nfs4_deviceid *d_id,
180 gfp_t gfp_flags) 164 gfp_t gfp_flags)
181{ 165{
182 struct pnfs_osd_deviceaddr *deviceaddr; 166 struct pnfs_osd_deviceaddr *deviceaddr;
183 struct nfs4_deviceid *d_id;
184 struct objio_dev_ent *ode; 167 struct objio_dev_ent *ode;
185 struct osd_dev *od; 168 struct osd_dev *od;
186 struct osd_dev_info odi; 169 struct osd_dev_info odi;
187 int err; 170 int err;
188 171
189 d_id = &objio_seg->comps[comp].oc_object_id.oid_device_id;
190
191 ode = _dev_list_find(NFS_SERVER(pnfslay->plh_inode), d_id); 172 ode = _dev_list_find(NFS_SERVER(pnfslay->plh_inode), d_id);
192 if (ode) 173 if (ode) {
193 return ode; 174 objio_seg->oc.ods[c] = &ode->od; /* must use container_of */
175 return 0;
176 }
194 177
195 err = objlayout_get_deviceinfo(pnfslay, d_id, &deviceaddr, gfp_flags); 178 err = objlayout_get_deviceinfo(pnfslay, d_id, &deviceaddr, gfp_flags);
196 if (unlikely(err)) { 179 if (unlikely(err)) {
197 dprintk("%s: objlayout_get_deviceinfo dev(%llx:%llx) =>%d\n", 180 dprintk("%s: objlayout_get_deviceinfo dev(%llx:%llx) =>%d\n",
198 __func__, _DEVID_LO(d_id), _DEVID_HI(d_id), err); 181 __func__, _DEVID_LO(d_id), _DEVID_HI(d_id), err);
199 return ERR_PTR(err); 182 return err;
200 } 183 }
201 184
202 odi.systemid_len = deviceaddr->oda_systemid.len; 185 odi.systemid_len = deviceaddr->oda_systemid.len;
203 if (odi.systemid_len > sizeof(odi.systemid)) { 186 if (odi.systemid_len > sizeof(odi.systemid)) {
187 dprintk("%s: odi.systemid_len > sizeof(systemid=%zd)\n",
188 __func__, sizeof(odi.systemid));
204 err = -EINVAL; 189 err = -EINVAL;
205 goto out; 190 goto out;
206 } else if (odi.systemid_len) 191 } else if (odi.systemid_len)
@@ -225,38 +210,15 @@ static struct objio_dev_ent *_device_lookup(struct pnfs_layout_hdr *pnfslay,
225 210
226 ode = _dev_list_add(NFS_SERVER(pnfslay->plh_inode), d_id, od, 211 ode = _dev_list_add(NFS_SERVER(pnfslay->plh_inode), d_id, od,
227 gfp_flags); 212 gfp_flags);
228 213 objio_seg->oc.ods[c] = &ode->od; /* must use container_of */
214 dprintk("Adding new dev_id(%llx:%llx)\n",
215 _DEVID_LO(d_id), _DEVID_HI(d_id));
229out: 216out:
230 dprintk("%s: return=%d\n", __func__, err);
231 objlayout_put_deviceinfo(deviceaddr); 217 objlayout_put_deviceinfo(deviceaddr);
232 return err ? ERR_PTR(err) : ode;
233}
234
235static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay,
236 struct objio_segment *objio_seg,
237 gfp_t gfp_flags)
238{
239 unsigned i;
240 int err;
241
242 /* lookup all devices */
243 for (i = 0; i < objio_seg->num_comps; i++) {
244 struct objio_dev_ent *ode;
245
246 ode = _device_lookup(pnfslay, objio_seg, i, gfp_flags);
247 if (unlikely(IS_ERR(ode))) {
248 err = PTR_ERR(ode);
249 goto out;
250 }
251 objio_seg->ods[i] = ode;
252 }
253 err = 0;
254
255out:
256 dprintk("%s: return=%d\n", __func__, err);
257 return err; 218 return err;
258} 219}
259 220
221#if 0
260static int _verify_data_map(struct pnfs_osd_layout *layout) 222static int _verify_data_map(struct pnfs_osd_layout *layout)
261{ 223{
262 struct pnfs_osd_data_map *data_map = &layout->olo_map; 224 struct pnfs_osd_data_map *data_map = &layout->olo_map;
@@ -296,23 +258,45 @@ static int _verify_data_map(struct pnfs_osd_layout *layout)
296 258
297 return 0; 259 return 0;
298} 260}
261#endif
299 262
300static void copy_single_comp(struct pnfs_osd_object_cred *cur_comp, 263static void copy_single_comp(struct ore_components *oc, unsigned c,
301 struct pnfs_osd_object_cred *src_comp, 264 struct pnfs_osd_object_cred *src_comp)
302 struct caps_buffers *caps_p)
303{ 265{
304 WARN_ON(src_comp->oc_cap_key.cred_len > sizeof(caps_p->caps_key)); 266 struct ore_comp *ocomp = &oc->comps[c];
305 WARN_ON(src_comp->oc_cap.cred_len > sizeof(caps_p->creds));
306 267
307 *cur_comp = *src_comp; 268 WARN_ON(src_comp->oc_cap_key.cred_len > 0); /* libosd is NO_SEC only */
269 WARN_ON(src_comp->oc_cap.cred_len > sizeof(ocomp->cred));
308 270
309 memcpy(caps_p->caps_key, src_comp->oc_cap_key.cred, 271 ocomp->obj.partition = src_comp->oc_object_id.oid_partition_id;
310 sizeof(caps_p->caps_key)); 272 ocomp->obj.id = src_comp->oc_object_id.oid_object_id;
311 cur_comp->oc_cap_key.cred = caps_p->caps_key;
312 273
313 memcpy(caps_p->creds, src_comp->oc_cap.cred, 274 memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred));
314 sizeof(caps_p->creds)); 275}
315 cur_comp->oc_cap.cred = caps_p->creds; 276
277int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
278 struct objio_segment **pseg)
279{
280 struct __alloc_objio_segment {
281 struct objio_segment olseg;
282 struct ore_dev *ods[numdevs];
283 struct ore_comp comps[numdevs];
284 } *aolseg;
285
286 aolseg = kzalloc(sizeof(*aolseg), gfp_flags);
287 if (unlikely(!aolseg)) {
288 dprintk("%s: Faild allocation numdevs=%d size=%zd\n", __func__,
289 numdevs, sizeof(*aolseg));
290 return -ENOMEM;
291 }
292
293 aolseg->olseg.oc.numdevs = numdevs;
294 aolseg->olseg.oc.single_comp = EC_MULTPLE_COMPS;
295 aolseg->olseg.oc.comps = aolseg->comps;
296 aolseg->olseg.oc.ods = aolseg->ods;
297
298 *pseg = &aolseg->olseg;
299 return 0;
316} 300}
317 301
318int objio_alloc_lseg(struct pnfs_layout_segment **outp, 302int objio_alloc_lseg(struct pnfs_layout_segment **outp,
@@ -324,59 +308,43 @@ int objio_alloc_lseg(struct pnfs_layout_segment **outp,
324 struct objio_segment *objio_seg; 308 struct objio_segment *objio_seg;
325 struct pnfs_osd_xdr_decode_layout_iter iter; 309 struct pnfs_osd_xdr_decode_layout_iter iter;
326 struct pnfs_osd_layout layout; 310 struct pnfs_osd_layout layout;
327 struct pnfs_osd_object_cred *cur_comp, src_comp; 311 struct pnfs_osd_object_cred src_comp;
328 struct caps_buffers *caps_p; 312 unsigned cur_comp;
329 int err; 313 int err;
330 314
331 err = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr); 315 err = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
332 if (unlikely(err)) 316 if (unlikely(err))
333 return err; 317 return err;
334 318
335 err = _verify_data_map(&layout); 319 err = __alloc_objio_seg(layout.olo_num_comps, gfp_flags, &objio_seg);
336 if (unlikely(err)) 320 if (unlikely(err))
337 return err; 321 return err;
338 322
339 objio_seg = kzalloc(sizeof(*objio_seg) + 323 objio_seg->layout.stripe_unit = layout.olo_map.odm_stripe_unit;
340 sizeof(objio_seg->ods[0]) * layout.olo_num_comps + 324 objio_seg->layout.group_width = layout.olo_map.odm_group_width;
341 sizeof(*objio_seg->comps) * layout.olo_num_comps + 325 objio_seg->layout.group_depth = layout.olo_map.odm_group_depth;
342 sizeof(struct caps_buffers) * layout.olo_num_comps, 326 objio_seg->layout.mirrors_p1 = layout.olo_map.odm_mirror_cnt + 1;
343 gfp_flags); 327 objio_seg->layout.raid_algorithm = layout.olo_map.odm_raid_algorithm;
344 if (!objio_seg)
345 return -ENOMEM;
346 328
347 objio_seg->comps = (void *)(objio_seg->ods + layout.olo_num_comps); 329 err = ore_verify_layout(layout.olo_map.odm_num_comps,
348 cur_comp = objio_seg->comps; 330 &objio_seg->layout);
349 caps_p = (void *)(cur_comp + layout.olo_num_comps);
350 while (pnfs_osd_xdr_decode_layout_comp(&src_comp, &iter, xdr, &err))
351 copy_single_comp(cur_comp++, &src_comp, caps_p++);
352 if (unlikely(err)) 331 if (unlikely(err))
353 goto err; 332 goto err;
354 333
355 objio_seg->num_comps = layout.olo_num_comps; 334 objio_seg->oc.first_dev = layout.olo_comps_index;
356 objio_seg->comps_index = layout.olo_comps_index; 335 cur_comp = 0;
357 err = objio_devices_lookup(pnfslay, objio_seg, gfp_flags); 336 while (pnfs_osd_xdr_decode_layout_comp(&src_comp, &iter, xdr, &err)) {
358 if (err) 337 copy_single_comp(&objio_seg->oc, cur_comp, &src_comp);
359 goto err; 338 err = objio_devices_lookup(pnfslay, objio_seg, cur_comp,
360 339 &src_comp.oc_object_id.oid_device_id,
361 objio_seg->mirrors_p1 = layout.olo_map.odm_mirror_cnt + 1; 340 gfp_flags);
362 objio_seg->stripe_unit = layout.olo_map.odm_stripe_unit; 341 if (err)
363 if (layout.olo_map.odm_group_width) { 342 goto err;
364 objio_seg->group_width = layout.olo_map.odm_group_width; 343 ++cur_comp;
365 objio_seg->group_depth = layout.olo_map.odm_group_depth;
366 objio_seg->group_count = layout.olo_map.odm_num_comps /
367 objio_seg->mirrors_p1 /
368 objio_seg->group_width;
369 } else {
370 objio_seg->group_width = layout.olo_map.odm_num_comps /
371 objio_seg->mirrors_p1;
372 objio_seg->group_depth = -1;
373 objio_seg->group_count = 1;
374 } 344 }
375 345 /* pnfs_osd_xdr_decode_layout_comp returns false on error */
376 /* Cache this calculation it will hit for every page */ 346 if (unlikely(err))
377 objio_seg->max_io_size = (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE - 347 goto err;
378 objio_seg->stripe_unit) *
379 objio_seg->group_width;
380 348
381 *outp = &objio_seg->lseg; 349 *outp = &objio_seg->lseg;
382 return 0; 350 return 0;
@@ -393,10 +361,14 @@ void objio_free_lseg(struct pnfs_layout_segment *lseg)
393 int i; 361 int i;
394 struct objio_segment *objio_seg = OBJIO_LSEG(lseg); 362 struct objio_segment *objio_seg = OBJIO_LSEG(lseg);
395 363
396 for (i = 0; i < objio_seg->num_comps; i++) { 364 for (i = 0; i < objio_seg->oc.numdevs; i++) {
397 if (!objio_seg->ods[i]) 365 struct ore_dev *od = objio_seg->oc.ods[i];
366 struct objio_dev_ent *ode;
367
368 if (!od)
398 break; 369 break;
399 nfs4_put_deviceid_node(&objio_seg->ods[i]->id_node); 370 ode = container_of(od, typeof(*ode), od);
371 nfs4_put_deviceid_node(&ode->id_node);
400 } 372 }
401 kfree(objio_seg); 373 kfree(objio_seg);
402} 374}
@@ -411,8 +383,8 @@ objio_alloc_io_state(struct pnfs_layout_hdr *pnfs_layout_type,
411 struct objio_state *ios; 383 struct objio_state *ios;
412 struct __alloc_objio_state { 384 struct __alloc_objio_state {
413 struct objio_state objios; 385 struct objio_state objios;
414 struct _objio_per_comp per_dev[objio_seg->num_comps]; 386 struct _objio_per_comp per_dev[objio_seg->oc.numdevs];
415 struct pnfs_osd_ioerr ioerrs[objio_seg->num_comps]; 387 struct pnfs_osd_ioerr ioerrs[objio_seg->oc.numdevs];
416 } *aos; 388 } *aos;
417 389
418 aos = kzalloc(sizeof(*aos), gfp_flags); 390 aos = kzalloc(sizeof(*aos), gfp_flags);
@@ -421,8 +393,9 @@ objio_alloc_io_state(struct pnfs_layout_hdr *pnfs_layout_type,
421 393
422 ios = &aos->objios; 394 ios = &aos->objios;
423 395
424 ios->layout = objio_seg; 396 ios->layout = &objio_seg->layout;
425 objlayout_init_ioerrs(&aos->objios.oir, objio_seg->num_comps, 397 ios->oc = &objio_seg->oc;
398 objlayout_init_ioerrs(&aos->objios.oir, objio_seg->oc.numdevs,
426 aos->ioerrs, rpcdata, pnfs_layout_type); 399 aos->ioerrs, rpcdata, pnfs_layout_type);
427 400
428 ios->pages = pages; 401 ios->pages = pages;
@@ -474,6 +447,27 @@ enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
474 } 447 }
475} 448}
476 449
450static void __on_dev_error(struct objio_state *ios, bool is_write,
451 struct ore_dev *od, unsigned dev_index, enum osd_err_priority oep,
452 u64 dev_offset, u64 dev_len)
453{
454 struct objio_state *objios = ios->private;
455 struct pnfs_osd_objid pooid;
456 struct objio_dev_ent *ode = container_of(od, typeof(*ode), od);
457 /* FIXME: what to do with more-then-one-group layouts. We need to
458 * translate from ore_io_state index to oc->comps index
459 */
460 unsigned comp = dev_index;
461
462 pooid.oid_device_id = ode->id_node.deviceid;
463 pooid.oid_partition_id = ios->oc->comps[comp].obj.partition;
464 pooid.oid_object_id = ios->oc->comps[comp].obj.id;
465
466 objlayout_io_set_result(&objios->oir, comp,
467 &pooid, osd_pri_2_pnfs_err(oep),
468 dev_offset, dev_len, is_write);
469}
470
477static void _clear_bio(struct bio *bio) 471static void _clear_bio(struct bio *bio)
478{ 472{
479 struct bio_vec *bv; 473 struct bio_vec *bv;
@@ -518,12 +512,9 @@ static int _io_check(struct objio_state *ios, bool is_write)
518 512
519 continue; /* we recovered */ 513 continue; /* we recovered */
520 } 514 }
521 objlayout_io_set_result(&ios->oir, i, 515 __on_dev_error(ios, is_write, ios->oc->ods[i],
522 &ios->layout->comps[i].oc_object_id, 516 ios->per_dev[i].dev, osi.osd_err_pri,
523 osd_pri_2_pnfs_err(osi.osd_err_pri), 517 ios->per_dev[i].offset, ios->per_dev[i].length);
524 ios->per_dev[i].offset,
525 ios->per_dev[i].length,
526 is_write);
527 518
528 if (osi.osd_err_pri >= oep) { 519 if (osi.osd_err_pri >= oep) {
529 oep = osi.osd_err_pri; 520 oep = osi.osd_err_pri;
@@ -558,11 +549,11 @@ static void _io_free(struct objio_state *ios)
558 549
559struct osd_dev *_io_od(struct objio_state *ios, unsigned dev) 550struct osd_dev *_io_od(struct objio_state *ios, unsigned dev)
560{ 551{
561 unsigned min_dev = ios->layout->comps_index; 552 unsigned min_dev = ios->oc->first_dev;
562 unsigned max_dev = min_dev + ios->layout->num_comps; 553 unsigned max_dev = min_dev + ios->oc->numdevs;
563 554
564 BUG_ON(dev < min_dev || max_dev <= dev); 555 BUG_ON(dev < min_dev || max_dev <= dev);
565 return ios->layout->ods[dev - min_dev]->od; 556 return ios->oc->ods[dev - min_dev]->od;
566} 557}
567 558
568struct _striping_info { 559struct _striping_info {
@@ -820,12 +811,9 @@ static int _read_mirrors(struct objio_state *ios, unsigned cur_comp)
820 struct osd_request *or = NULL; 811 struct osd_request *or = NULL;
821 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp]; 812 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp];
822 unsigned dev = per_dev->dev; 813 unsigned dev = per_dev->dev;
823 struct pnfs_osd_object_cred *cred = 814 struct ore_comp *cred =
824 &ios->layout->comps[cur_comp]; 815 &ios->oc->comps[cur_comp];
825 struct osd_obj_id obj = { 816 struct osd_obj_id obj = cred->obj;
826 .partition = cred->oc_object_id.oid_partition_id,
827 .id = cred->oc_object_id.oid_object_id,
828 };
829 int ret; 817 int ret;
830 818
831 or = osd_start_request(_io_od(ios, dev), GFP_KERNEL); 819 or = osd_start_request(_io_od(ios, dev), GFP_KERNEL);
@@ -837,7 +825,7 @@ static int _read_mirrors(struct objio_state *ios, unsigned cur_comp)
837 825
838 osd_req_read(or, &obj, per_dev->offset, per_dev->bio, per_dev->length); 826 osd_req_read(or, &obj, per_dev->offset, per_dev->bio, per_dev->length);
839 827
840 ret = osd_finalize_request(or, 0, cred->oc_cap.cred, NULL); 828 ret = osd_finalize_request(or, 0, cred->cred, NULL);
841 if (ret) { 829 if (ret) {
842 dprintk("%s: Faild to osd_finalize_request() => %d\n", 830 dprintk("%s: Faild to osd_finalize_request() => %d\n",
843 __func__, ret); 831 __func__, ret);
@@ -924,12 +912,8 @@ static int _write_mirrors(struct objio_state *ios, unsigned cur_comp)
924 912
925 for (; cur_comp < last_comp; ++cur_comp, ++dev) { 913 for (; cur_comp < last_comp; ++cur_comp, ++dev) {
926 struct osd_request *or = NULL; 914 struct osd_request *or = NULL;
927 struct pnfs_osd_object_cred *cred = 915 struct ore_comp *cred = &ios->oc->comps[cur_comp];
928 &ios->layout->comps[cur_comp]; 916 struct osd_obj_id obj = cred->obj;
929 struct osd_obj_id obj = {
930 .partition = cred->oc_object_id.oid_partition_id,
931 .id = cred->oc_object_id.oid_object_id,
932 };
933 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp]; 917 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp];
934 struct bio *bio; 918 struct bio *bio;
935 919
@@ -964,7 +948,7 @@ static int _write_mirrors(struct objio_state *ios, unsigned cur_comp)
964 948
965 osd_req_write(or, &obj, per_dev->offset, bio, per_dev->length); 949 osd_req_write(or, &obj, per_dev->offset, bio, per_dev->length);
966 950
967 ret = osd_finalize_request(or, 0, cred->oc_cap.cred, NULL); 951 ret = osd_finalize_request(or, 0, cred->cred, NULL);
968 if (ret) { 952 if (ret) {
969 dprintk("%s: Faild to osd_finalize_request() => %d\n", 953 dprintk("%s: Faild to osd_finalize_request() => %d\n",
970 __func__, ret); 954 __func__, ret);
@@ -1030,7 +1014,7 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
1030 return false; 1014 return false;
1031 1015
1032 return pgio->pg_count + req->wb_bytes <= 1016 return pgio->pg_count + req->wb_bytes <=
1033 OBJIO_LSEG(pgio->pg_lseg)->max_io_size; 1017 OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length;
1034} 1018}
1035 1019
1036static const struct nfs_pageio_ops objio_pg_read_ops = { 1020static const struct nfs_pageio_ops objio_pg_read_ops = {