aboutsummaryrefslogtreecommitdiffstats
path: root/fs/nfs/objlayout
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-06-04 06:13:06 -0400
committerIngo Molnar <mingo@elte.hu>2011-06-04 06:13:06 -0400
commit710054ba25c0d1f8f41c22ce13ba336503fb5318 (patch)
treef9b09b722bf511841539173d946f90a20fc2e59a /fs/nfs/objlayout
parent74c355fbdfedd3820046dba4f537876cea54c207 (diff)
parentb273fa9716aa1564bee88ceee62f9042981cdc81 (diff)
Merge branch 'perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent
Diffstat (limited to 'fs/nfs/objlayout')
-rw-r--r--fs/nfs/objlayout/Kbuild5
-rw-r--r--fs/nfs/objlayout/objio_osd.c1057
-rw-r--r--fs/nfs/objlayout/objlayout.c712
-rw-r--r--fs/nfs/objlayout/objlayout.h187
-rw-r--r--fs/nfs/objlayout/pnfs_osd_xdr_cli.c412
5 files changed, 2373 insertions, 0 deletions
diff --git a/fs/nfs/objlayout/Kbuild b/fs/nfs/objlayout/Kbuild
new file mode 100644
index 000000000000..ed30ea072bb8
--- /dev/null
+++ b/fs/nfs/objlayout/Kbuild
@@ -0,0 +1,5 @@
1#
2# Makefile for the pNFS Objects Layout Driver kernel module
3#
4objlayoutdriver-y := objio_osd.o pnfs_osd_xdr_cli.o objlayout.o
5obj-$(CONFIG_PNFS_OBJLAYOUT) += objlayoutdriver.o
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
new file mode 100644
index 000000000000..9cf208df1f25
--- /dev/null
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -0,0 +1,1057 @@
1/*
2 * pNFS Objects layout implementation over open-osd initiator library
3 *
4 * Copyright (C) 2009 Panasas Inc. [year of first publication]
5 * All rights reserved.
6 *
7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * See the file COPYING included with this distribution for more details.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 *
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the Panasas company nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
34 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#include <linux/module.h>
41#include <scsi/osd_initiator.h>
42
43#include "objlayout.h"
44
45#define NFSDBG_FACILITY NFSDBG_PNFS_LD
46
47#define _LLU(x) ((unsigned long long)x)
48
49enum { BIO_MAX_PAGES_KMALLOC =
50 (PAGE_SIZE - sizeof(struct bio)) / sizeof(struct bio_vec),
51};
52
53struct objio_dev_ent {
54 struct nfs4_deviceid_node id_node;
55 struct osd_dev *od;
56};
57
58static void
59objio_free_deviceid_node(struct nfs4_deviceid_node *d)
60{
61 struct objio_dev_ent *de = container_of(d, struct objio_dev_ent, id_node);
62
63 dprintk("%s: free od=%p\n", __func__, de->od);
64 osduld_put_device(de->od);
65 kfree(de);
66}
67
68static struct objio_dev_ent *_dev_list_find(const struct nfs_server *nfss,
69 const struct nfs4_deviceid *d_id)
70{
71 struct nfs4_deviceid_node *d;
72 struct objio_dev_ent *de;
73
74 d = nfs4_find_get_deviceid(nfss->pnfs_curr_ld, nfss->nfs_client, d_id);
75 if (!d)
76 return NULL;
77
78 de = container_of(d, struct objio_dev_ent, id_node);
79 return de;
80}
81
82static struct objio_dev_ent *
83_dev_list_add(const struct nfs_server *nfss,
84 const struct nfs4_deviceid *d_id, struct osd_dev *od,
85 gfp_t gfp_flags)
86{
87 struct nfs4_deviceid_node *d;
88 struct objio_dev_ent *de = kzalloc(sizeof(*de), gfp_flags);
89 struct objio_dev_ent *n;
90
91 if (!de) {
92 dprintk("%s: -ENOMEM od=%p\n", __func__, od);
93 return NULL;
94 }
95
96 dprintk("%s: Adding od=%p\n", __func__, od);
97 nfs4_init_deviceid_node(&de->id_node,
98 nfss->pnfs_curr_ld,
99 nfss->nfs_client,
100 d_id);
101 de->od = od;
102
103 d = nfs4_insert_deviceid_node(&de->id_node);
104 n = container_of(d, struct objio_dev_ent, id_node);
105 if (n != de) {
106 dprintk("%s: Race with other n->od=%p\n", __func__, n->od);
107 objio_free_deviceid_node(&de->id_node);
108 de = n;
109 }
110
111 atomic_inc(&de->id_node.ref);
112 return de;
113}
114
115struct caps_buffers {
116 u8 caps_key[OSD_CRYPTO_KEYID_SIZE];
117 u8 creds[OSD_CAP_LEN];
118};
119
120struct objio_segment {
121 struct pnfs_layout_segment lseg;
122
123 struct pnfs_osd_object_cred *comps;
124
125 unsigned mirrors_p1;
126 unsigned stripe_unit;
127 unsigned group_width; /* Data stripe_units without integrity comps */
128 u64 group_depth;
129 unsigned group_count;
130
131 unsigned max_io_size;
132
133 unsigned comps_index;
134 unsigned num_comps;
135 /* variable length */
136 struct objio_dev_ent *ods[];
137};
138
139static inline struct objio_segment *
140OBJIO_LSEG(struct pnfs_layout_segment *lseg)
141{
142 return container_of(lseg, struct objio_segment, lseg);
143}
144
145struct objio_state;
146typedef ssize_t (*objio_done_fn)(struct objio_state *ios);
147
148struct objio_state {
149 /* Generic layer */
150 struct objlayout_io_state ol_state;
151
152 struct objio_segment *layout;
153
154 struct kref kref;
155 objio_done_fn done;
156 void *private;
157
158 unsigned long length;
159 unsigned numdevs; /* Actually used devs in this IO */
160 /* A per-device variable array of size numdevs */
161 struct _objio_per_comp {
162 struct bio *bio;
163 struct osd_request *or;
164 unsigned long length;
165 u64 offset;
166 unsigned dev;
167 } per_dev[];
168};
169
170/* Send and wait for a get_device_info of devices in the layout,
171 then look them up with the osd_initiator library */
172static struct objio_dev_ent *_device_lookup(struct pnfs_layout_hdr *pnfslay,
173 struct objio_segment *objio_seg, unsigned comp,
174 gfp_t gfp_flags)
175{
176 struct pnfs_osd_deviceaddr *deviceaddr;
177 struct nfs4_deviceid *d_id;
178 struct objio_dev_ent *ode;
179 struct osd_dev *od;
180 struct osd_dev_info odi;
181 int err;
182
183 d_id = &objio_seg->comps[comp].oc_object_id.oid_device_id;
184
185 ode = _dev_list_find(NFS_SERVER(pnfslay->plh_inode), d_id);
186 if (ode)
187 return ode;
188
189 err = objlayout_get_deviceinfo(pnfslay, d_id, &deviceaddr, gfp_flags);
190 if (unlikely(err)) {
191 dprintk("%s: objlayout_get_deviceinfo dev(%llx:%llx) =>%d\n",
192 __func__, _DEVID_LO(d_id), _DEVID_HI(d_id), err);
193 return ERR_PTR(err);
194 }
195
196 odi.systemid_len = deviceaddr->oda_systemid.len;
197 if (odi.systemid_len > sizeof(odi.systemid)) {
198 err = -EINVAL;
199 goto out;
200 } else if (odi.systemid_len)
201 memcpy(odi.systemid, deviceaddr->oda_systemid.data,
202 odi.systemid_len);
203 odi.osdname_len = deviceaddr->oda_osdname.len;
204 odi.osdname = (u8 *)deviceaddr->oda_osdname.data;
205
206 if (!odi.osdname_len && !odi.systemid_len) {
207 dprintk("%s: !odi.osdname_len && !odi.systemid_len\n",
208 __func__);
209 err = -ENODEV;
210 goto out;
211 }
212
213 od = osduld_info_lookup(&odi);
214 if (unlikely(IS_ERR(od))) {
215 err = PTR_ERR(od);
216 dprintk("%s: osduld_info_lookup => %d\n", __func__, err);
217 goto out;
218 }
219
220 ode = _dev_list_add(NFS_SERVER(pnfslay->plh_inode), d_id, od,
221 gfp_flags);
222
223out:
224 dprintk("%s: return=%d\n", __func__, err);
225 objlayout_put_deviceinfo(deviceaddr);
226 return err ? ERR_PTR(err) : ode;
227}
228
229static int objio_devices_lookup(struct pnfs_layout_hdr *pnfslay,
230 struct objio_segment *objio_seg,
231 gfp_t gfp_flags)
232{
233 unsigned i;
234 int err;
235
236 /* lookup all devices */
237 for (i = 0; i < objio_seg->num_comps; i++) {
238 struct objio_dev_ent *ode;
239
240 ode = _device_lookup(pnfslay, objio_seg, i, gfp_flags);
241 if (unlikely(IS_ERR(ode))) {
242 err = PTR_ERR(ode);
243 goto out;
244 }
245 objio_seg->ods[i] = ode;
246 }
247 err = 0;
248
249out:
250 dprintk("%s: return=%d\n", __func__, err);
251 return err;
252}
253
254static int _verify_data_map(struct pnfs_osd_layout *layout)
255{
256 struct pnfs_osd_data_map *data_map = &layout->olo_map;
257 u64 stripe_length;
258 u32 group_width;
259
260/* FIXME: Only raid0 for now. if not go through MDS */
261 if (data_map->odm_raid_algorithm != PNFS_OSD_RAID_0) {
262 printk(KERN_ERR "Only RAID_0 for now\n");
263 return -ENOTSUPP;
264 }
265 if (0 != (data_map->odm_num_comps % (data_map->odm_mirror_cnt + 1))) {
266 printk(KERN_ERR "Data Map wrong, num_comps=%u mirrors=%u\n",
267 data_map->odm_num_comps, data_map->odm_mirror_cnt);
268 return -EINVAL;
269 }
270
271 if (data_map->odm_group_width)
272 group_width = data_map->odm_group_width;
273 else
274 group_width = data_map->odm_num_comps /
275 (data_map->odm_mirror_cnt + 1);
276
277 stripe_length = (u64)data_map->odm_stripe_unit * group_width;
278 if (stripe_length >= (1ULL << 32)) {
279 printk(KERN_ERR "Total Stripe length(0x%llx)"
280 " >= 32bit is not supported\n", _LLU(stripe_length));
281 return -ENOTSUPP;
282 }
283
284 if (0 != (data_map->odm_stripe_unit & ~PAGE_MASK)) {
285 printk(KERN_ERR "Stripe Unit(0x%llx)"
286 " must be Multples of PAGE_SIZE(0x%lx)\n",
287 _LLU(data_map->odm_stripe_unit), PAGE_SIZE);
288 return -ENOTSUPP;
289 }
290
291 return 0;
292}
293
294static void copy_single_comp(struct pnfs_osd_object_cred *cur_comp,
295 struct pnfs_osd_object_cred *src_comp,
296 struct caps_buffers *caps_p)
297{
298 WARN_ON(src_comp->oc_cap_key.cred_len > sizeof(caps_p->caps_key));
299 WARN_ON(src_comp->oc_cap.cred_len > sizeof(caps_p->creds));
300
301 *cur_comp = *src_comp;
302
303 memcpy(caps_p->caps_key, src_comp->oc_cap_key.cred,
304 sizeof(caps_p->caps_key));
305 cur_comp->oc_cap_key.cred = caps_p->caps_key;
306
307 memcpy(caps_p->creds, src_comp->oc_cap.cred,
308 sizeof(caps_p->creds));
309 cur_comp->oc_cap.cred = caps_p->creds;
310}
311
312int objio_alloc_lseg(struct pnfs_layout_segment **outp,
313 struct pnfs_layout_hdr *pnfslay,
314 struct pnfs_layout_range *range,
315 struct xdr_stream *xdr,
316 gfp_t gfp_flags)
317{
318 struct objio_segment *objio_seg;
319 struct pnfs_osd_xdr_decode_layout_iter iter;
320 struct pnfs_osd_layout layout;
321 struct pnfs_osd_object_cred *cur_comp, src_comp;
322 struct caps_buffers *caps_p;
323 int err;
324
325 err = pnfs_osd_xdr_decode_layout_map(&layout, &iter, xdr);
326 if (unlikely(err))
327 return err;
328
329 err = _verify_data_map(&layout);
330 if (unlikely(err))
331 return err;
332
333 objio_seg = kzalloc(sizeof(*objio_seg) +
334 sizeof(objio_seg->ods[0]) * layout.olo_num_comps +
335 sizeof(*objio_seg->comps) * layout.olo_num_comps +
336 sizeof(struct caps_buffers) * layout.olo_num_comps,
337 gfp_flags);
338 if (!objio_seg)
339 return -ENOMEM;
340
341 objio_seg->comps = (void *)(objio_seg->ods + layout.olo_num_comps);
342 cur_comp = objio_seg->comps;
343 caps_p = (void *)(cur_comp + layout.olo_num_comps);
344 while (pnfs_osd_xdr_decode_layout_comp(&src_comp, &iter, xdr, &err))
345 copy_single_comp(cur_comp++, &src_comp, caps_p++);
346 if (unlikely(err))
347 goto err;
348
349 objio_seg->num_comps = layout.olo_num_comps;
350 objio_seg->comps_index = layout.olo_comps_index;
351 err = objio_devices_lookup(pnfslay, objio_seg, gfp_flags);
352 if (err)
353 goto err;
354
355 objio_seg->mirrors_p1 = layout.olo_map.odm_mirror_cnt + 1;
356 objio_seg->stripe_unit = layout.olo_map.odm_stripe_unit;
357 if (layout.olo_map.odm_group_width) {
358 objio_seg->group_width = layout.olo_map.odm_group_width;
359 objio_seg->group_depth = layout.olo_map.odm_group_depth;
360 objio_seg->group_count = layout.olo_map.odm_num_comps /
361 objio_seg->mirrors_p1 /
362 objio_seg->group_width;
363 } else {
364 objio_seg->group_width = layout.olo_map.odm_num_comps /
365 objio_seg->mirrors_p1;
366 objio_seg->group_depth = -1;
367 objio_seg->group_count = 1;
368 }
369
370 /* Cache this calculation it will hit for every page */
371 objio_seg->max_io_size = (BIO_MAX_PAGES_KMALLOC * PAGE_SIZE -
372 objio_seg->stripe_unit) *
373 objio_seg->group_width;
374
375 *outp = &objio_seg->lseg;
376 return 0;
377
378err:
379 kfree(objio_seg);
380 dprintk("%s: Error: return %d\n", __func__, err);
381 *outp = NULL;
382 return err;
383}
384
385void objio_free_lseg(struct pnfs_layout_segment *lseg)
386{
387 int i;
388 struct objio_segment *objio_seg = OBJIO_LSEG(lseg);
389
390 for (i = 0; i < objio_seg->num_comps; i++) {
391 if (!objio_seg->ods[i])
392 break;
393 nfs4_put_deviceid_node(&objio_seg->ods[i]->id_node);
394 }
395 kfree(objio_seg);
396}
397
398int objio_alloc_io_state(struct pnfs_layout_segment *lseg,
399 struct objlayout_io_state **outp,
400 gfp_t gfp_flags)
401{
402 struct objio_segment *objio_seg = OBJIO_LSEG(lseg);
403 struct objio_state *ios;
404 const unsigned first_size = sizeof(*ios) +
405 objio_seg->num_comps * sizeof(ios->per_dev[0]);
406 const unsigned sec_size = objio_seg->num_comps *
407 sizeof(ios->ol_state.ioerrs[0]);
408
409 ios = kzalloc(first_size + sec_size, gfp_flags);
410 if (unlikely(!ios))
411 return -ENOMEM;
412
413 ios->layout = objio_seg;
414 ios->ol_state.ioerrs = ((void *)ios) + first_size;
415 ios->ol_state.num_comps = objio_seg->num_comps;
416
417 *outp = &ios->ol_state;
418 return 0;
419}
420
421void objio_free_io_state(struct objlayout_io_state *ol_state)
422{
423 struct objio_state *ios = container_of(ol_state, struct objio_state,
424 ol_state);
425
426 kfree(ios);
427}
428
429enum pnfs_osd_errno osd_pri_2_pnfs_err(enum osd_err_priority oep)
430{
431 switch (oep) {
432 case OSD_ERR_PRI_NO_ERROR:
433 return (enum pnfs_osd_errno)0;
434
435 case OSD_ERR_PRI_CLEAR_PAGES:
436 BUG_ON(1);
437 return 0;
438
439 case OSD_ERR_PRI_RESOURCE:
440 return PNFS_OSD_ERR_RESOURCE;
441 case OSD_ERR_PRI_BAD_CRED:
442 return PNFS_OSD_ERR_BAD_CRED;
443 case OSD_ERR_PRI_NO_ACCESS:
444 return PNFS_OSD_ERR_NO_ACCESS;
445 case OSD_ERR_PRI_UNREACHABLE:
446 return PNFS_OSD_ERR_UNREACHABLE;
447 case OSD_ERR_PRI_NOT_FOUND:
448 return PNFS_OSD_ERR_NOT_FOUND;
449 case OSD_ERR_PRI_NO_SPACE:
450 return PNFS_OSD_ERR_NO_SPACE;
451 default:
452 WARN_ON(1);
453 /* fallthrough */
454 case OSD_ERR_PRI_EIO:
455 return PNFS_OSD_ERR_EIO;
456 }
457}
458
459static void _clear_bio(struct bio *bio)
460{
461 struct bio_vec *bv;
462 unsigned i;
463
464 __bio_for_each_segment(bv, bio, i, 0) {
465 unsigned this_count = bv->bv_len;
466
467 if (likely(PAGE_SIZE == this_count))
468 clear_highpage(bv->bv_page);
469 else
470 zero_user(bv->bv_page, bv->bv_offset, this_count);
471 }
472}
473
474static int _io_check(struct objio_state *ios, bool is_write)
475{
476 enum osd_err_priority oep = OSD_ERR_PRI_NO_ERROR;
477 int lin_ret = 0;
478 int i;
479
480 for (i = 0; i < ios->numdevs; i++) {
481 struct osd_sense_info osi;
482 struct osd_request *or = ios->per_dev[i].or;
483 unsigned dev;
484 int ret;
485
486 if (!or)
487 continue;
488
489 ret = osd_req_decode_sense(or, &osi);
490 if (likely(!ret))
491 continue;
492
493 if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) {
494 /* start read offset passed endof file */
495 BUG_ON(is_write);
496 _clear_bio(ios->per_dev[i].bio);
497 dprintk("%s: start read offset passed end of file "
498 "offset=0x%llx, length=0x%lx\n", __func__,
499 _LLU(ios->per_dev[i].offset),
500 ios->per_dev[i].length);
501
502 continue; /* we recovered */
503 }
504 dev = ios->per_dev[i].dev;
505 objlayout_io_set_result(&ios->ol_state, dev,
506 &ios->layout->comps[dev].oc_object_id,
507 osd_pri_2_pnfs_err(osi.osd_err_pri),
508 ios->per_dev[i].offset,
509 ios->per_dev[i].length,
510 is_write);
511
512 if (osi.osd_err_pri >= oep) {
513 oep = osi.osd_err_pri;
514 lin_ret = ret;
515 }
516 }
517
518 return lin_ret;
519}
520
521/*
522 * Common IO state helpers.
523 */
524static void _io_free(struct objio_state *ios)
525{
526 unsigned i;
527
528 for (i = 0; i < ios->numdevs; i++) {
529 struct _objio_per_comp *per_dev = &ios->per_dev[i];
530
531 if (per_dev->or) {
532 osd_end_request(per_dev->or);
533 per_dev->or = NULL;
534 }
535
536 if (per_dev->bio) {
537 bio_put(per_dev->bio);
538 per_dev->bio = NULL;
539 }
540 }
541}
542
543struct osd_dev *_io_od(struct objio_state *ios, unsigned dev)
544{
545 unsigned min_dev = ios->layout->comps_index;
546 unsigned max_dev = min_dev + ios->layout->num_comps;
547
548 BUG_ON(dev < min_dev || max_dev <= dev);
549 return ios->layout->ods[dev - min_dev]->od;
550}
551
552struct _striping_info {
553 u64 obj_offset;
554 u64 group_length;
555 unsigned dev;
556 unsigned unit_off;
557};
558
559static void _calc_stripe_info(struct objio_state *ios, u64 file_offset,
560 struct _striping_info *si)
561{
562 u32 stripe_unit = ios->layout->stripe_unit;
563 u32 group_width = ios->layout->group_width;
564 u64 group_depth = ios->layout->group_depth;
565 u32 U = stripe_unit * group_width;
566
567 u64 T = U * group_depth;
568 u64 S = T * ios->layout->group_count;
569 u64 M = div64_u64(file_offset, S);
570
571 /*
572 G = (L - (M * S)) / T
573 H = (L - (M * S)) % T
574 */
575 u64 LmodU = file_offset - M * S;
576 u32 G = div64_u64(LmodU, T);
577 u64 H = LmodU - G * T;
578
579 u32 N = div_u64(H, U);
580
581 div_u64_rem(file_offset, stripe_unit, &si->unit_off);
582 si->obj_offset = si->unit_off + (N * stripe_unit) +
583 (M * group_depth * stripe_unit);
584
585 /* "H - (N * U)" is just "H % U" so it's bound to u32 */
586 si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width;
587 si->dev *= ios->layout->mirrors_p1;
588
589 si->group_length = T - H;
590}
591
592static int _add_stripe_unit(struct objio_state *ios, unsigned *cur_pg,
593 unsigned pgbase, struct _objio_per_comp *per_dev, int cur_len,
594 gfp_t gfp_flags)
595{
596 unsigned pg = *cur_pg;
597 struct request_queue *q =
598 osd_request_queue(_io_od(ios, per_dev->dev));
599
600 per_dev->length += cur_len;
601
602 if (per_dev->bio == NULL) {
603 unsigned stripes = ios->layout->num_comps /
604 ios->layout->mirrors_p1;
605 unsigned pages_in_stripe = stripes *
606 (ios->layout->stripe_unit / PAGE_SIZE);
607 unsigned bio_size = (ios->ol_state.nr_pages + pages_in_stripe) /
608 stripes;
609
610 if (BIO_MAX_PAGES_KMALLOC < bio_size)
611 bio_size = BIO_MAX_PAGES_KMALLOC;
612
613 per_dev->bio = bio_kmalloc(gfp_flags, bio_size);
614 if (unlikely(!per_dev->bio)) {
615 dprintk("Faild to allocate BIO size=%u\n", bio_size);
616 return -ENOMEM;
617 }
618 }
619
620 while (cur_len > 0) {
621 unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len);
622 unsigned added_len;
623
624 BUG_ON(ios->ol_state.nr_pages <= pg);
625 cur_len -= pglen;
626
627 added_len = bio_add_pc_page(q, per_dev->bio,
628 ios->ol_state.pages[pg], pglen, pgbase);
629 if (unlikely(pglen != added_len))
630 return -ENOMEM;
631 pgbase = 0;
632 ++pg;
633 }
634 BUG_ON(cur_len);
635
636 *cur_pg = pg;
637 return 0;
638}
639
640static int _prepare_one_group(struct objio_state *ios, u64 length,
641 struct _striping_info *si, unsigned *last_pg,
642 gfp_t gfp_flags)
643{
644 unsigned stripe_unit = ios->layout->stripe_unit;
645 unsigned mirrors_p1 = ios->layout->mirrors_p1;
646 unsigned devs_in_group = ios->layout->group_width * mirrors_p1;
647 unsigned dev = si->dev;
648 unsigned first_dev = dev - (dev % devs_in_group);
649 unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0;
650 unsigned cur_pg = *last_pg;
651 int ret = 0;
652
653 while (length) {
654 struct _objio_per_comp *per_dev = &ios->per_dev[dev];
655 unsigned cur_len, page_off = 0;
656
657 if (!per_dev->length) {
658 per_dev->dev = dev;
659 if (dev < si->dev) {
660 per_dev->offset = si->obj_offset + stripe_unit -
661 si->unit_off;
662 cur_len = stripe_unit;
663 } else if (dev == si->dev) {
664 per_dev->offset = si->obj_offset;
665 cur_len = stripe_unit - si->unit_off;
666 page_off = si->unit_off & ~PAGE_MASK;
667 BUG_ON(page_off &&
668 (page_off != ios->ol_state.pgbase));
669 } else { /* dev > si->dev */
670 per_dev->offset = si->obj_offset - si->unit_off;
671 cur_len = stripe_unit;
672 }
673
674 if (max_comp < dev)
675 max_comp = dev;
676 } else {
677 cur_len = stripe_unit;
678 }
679 if (cur_len >= length)
680 cur_len = length;
681
682 ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev,
683 cur_len, gfp_flags);
684 if (unlikely(ret))
685 goto out;
686
687 dev += mirrors_p1;
688 dev = (dev % devs_in_group) + first_dev;
689
690 length -= cur_len;
691 ios->length += cur_len;
692 }
693out:
694 ios->numdevs = max_comp + mirrors_p1;
695 *last_pg = cur_pg;
696 return ret;
697}
698
699static int _io_rw_pagelist(struct objio_state *ios, gfp_t gfp_flags)
700{
701 u64 length = ios->ol_state.count;
702 u64 offset = ios->ol_state.offset;
703 struct _striping_info si;
704 unsigned last_pg = 0;
705 int ret = 0;
706
707 while (length) {
708 _calc_stripe_info(ios, offset, &si);
709
710 if (length < si.group_length)
711 si.group_length = length;
712
713 ret = _prepare_one_group(ios, si.group_length, &si, &last_pg, gfp_flags);
714 if (unlikely(ret))
715 goto out;
716
717 offset += si.group_length;
718 length -= si.group_length;
719 }
720
721out:
722 if (!ios->length)
723 return ret;
724
725 return 0;
726}
727
728static ssize_t _sync_done(struct objio_state *ios)
729{
730 struct completion *waiting = ios->private;
731
732 complete(waiting);
733 return 0;
734}
735
736static void _last_io(struct kref *kref)
737{
738 struct objio_state *ios = container_of(kref, struct objio_state, kref);
739
740 ios->done(ios);
741}
742
743static void _done_io(struct osd_request *or, void *p)
744{
745 struct objio_state *ios = p;
746
747 kref_put(&ios->kref, _last_io);
748}
749
750static ssize_t _io_exec(struct objio_state *ios)
751{
752 DECLARE_COMPLETION_ONSTACK(wait);
753 ssize_t status = 0; /* sync status */
754 unsigned i;
755 objio_done_fn saved_done_fn = ios->done;
756 bool sync = ios->ol_state.sync;
757
758 if (sync) {
759 ios->done = _sync_done;
760 ios->private = &wait;
761 }
762
763 kref_init(&ios->kref);
764
765 for (i = 0; i < ios->numdevs; i++) {
766 struct osd_request *or = ios->per_dev[i].or;
767
768 if (!or)
769 continue;
770
771 kref_get(&ios->kref);
772 osd_execute_request_async(or, _done_io, ios);
773 }
774
775 kref_put(&ios->kref, _last_io);
776
777 if (sync) {
778 wait_for_completion(&wait);
779 status = saved_done_fn(ios);
780 }
781
782 return status;
783}
784
785/*
786 * read
787 */
788static ssize_t _read_done(struct objio_state *ios)
789{
790 ssize_t status;
791 int ret = _io_check(ios, false);
792
793 _io_free(ios);
794
795 if (likely(!ret))
796 status = ios->length;
797 else
798 status = ret;
799
800 objlayout_read_done(&ios->ol_state, status, ios->ol_state.sync);
801 return status;
802}
803
804static int _read_mirrors(struct objio_state *ios, unsigned cur_comp)
805{
806 struct osd_request *or = NULL;
807 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp];
808 unsigned dev = per_dev->dev;
809 struct pnfs_osd_object_cred *cred =
810 &ios->layout->comps[dev];
811 struct osd_obj_id obj = {
812 .partition = cred->oc_object_id.oid_partition_id,
813 .id = cred->oc_object_id.oid_object_id,
814 };
815 int ret;
816
817 or = osd_start_request(_io_od(ios, dev), GFP_KERNEL);
818 if (unlikely(!or)) {
819 ret = -ENOMEM;
820 goto err;
821 }
822 per_dev->or = or;
823
824 osd_req_read(or, &obj, per_dev->offset, per_dev->bio, per_dev->length);
825
826 ret = osd_finalize_request(or, 0, cred->oc_cap.cred, NULL);
827 if (ret) {
828 dprintk("%s: Faild to osd_finalize_request() => %d\n",
829 __func__, ret);
830 goto err;
831 }
832
833 dprintk("%s:[%d] dev=%d obj=0x%llx start=0x%llx length=0x%lx\n",
834 __func__, cur_comp, dev, obj.id, _LLU(per_dev->offset),
835 per_dev->length);
836
837err:
838 return ret;
839}
840
841static ssize_t _read_exec(struct objio_state *ios)
842{
843 unsigned i;
844 int ret;
845
846 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
847 if (!ios->per_dev[i].length)
848 continue;
849 ret = _read_mirrors(ios, i);
850 if (unlikely(ret))
851 goto err;
852 }
853
854 ios->done = _read_done;
855 return _io_exec(ios); /* In sync mode exec returns the io status */
856
857err:
858 _io_free(ios);
859 return ret;
860}
861
862ssize_t objio_read_pagelist(struct objlayout_io_state *ol_state)
863{
864 struct objio_state *ios = container_of(ol_state, struct objio_state,
865 ol_state);
866 int ret;
867
868 ret = _io_rw_pagelist(ios, GFP_KERNEL);
869 if (unlikely(ret))
870 return ret;
871
872 return _read_exec(ios);
873}
874
875/*
876 * write
877 */
878static ssize_t _write_done(struct objio_state *ios)
879{
880 ssize_t status;
881 int ret = _io_check(ios, true);
882
883 _io_free(ios);
884
885 if (likely(!ret)) {
886 /* FIXME: should be based on the OSD's persistence model
887 * See OSD2r05 Section 4.13 Data persistence model */
888 ios->ol_state.committed = NFS_FILE_SYNC;
889 status = ios->length;
890 } else {
891 status = ret;
892 }
893
894 objlayout_write_done(&ios->ol_state, status, ios->ol_state.sync);
895 return status;
896}
897
898static int _write_mirrors(struct objio_state *ios, unsigned cur_comp)
899{
900 struct _objio_per_comp *master_dev = &ios->per_dev[cur_comp];
901 unsigned dev = ios->per_dev[cur_comp].dev;
902 unsigned last_comp = cur_comp + ios->layout->mirrors_p1;
903 int ret;
904
905 for (; cur_comp < last_comp; ++cur_comp, ++dev) {
906 struct osd_request *or = NULL;
907 struct pnfs_osd_object_cred *cred =
908 &ios->layout->comps[dev];
909 struct osd_obj_id obj = {
910 .partition = cred->oc_object_id.oid_partition_id,
911 .id = cred->oc_object_id.oid_object_id,
912 };
913 struct _objio_per_comp *per_dev = &ios->per_dev[cur_comp];
914 struct bio *bio;
915
916 or = osd_start_request(_io_od(ios, dev), GFP_NOFS);
917 if (unlikely(!or)) {
918 ret = -ENOMEM;
919 goto err;
920 }
921 per_dev->or = or;
922
923 if (per_dev != master_dev) {
924 bio = bio_kmalloc(GFP_NOFS,
925 master_dev->bio->bi_max_vecs);
926 if (unlikely(!bio)) {
927 dprintk("Faild to allocate BIO size=%u\n",
928 master_dev->bio->bi_max_vecs);
929 ret = -ENOMEM;
930 goto err;
931 }
932
933 __bio_clone(bio, master_dev->bio);
934 bio->bi_bdev = NULL;
935 bio->bi_next = NULL;
936 per_dev->bio = bio;
937 per_dev->dev = dev;
938 per_dev->length = master_dev->length;
939 per_dev->offset = master_dev->offset;
940 } else {
941 bio = master_dev->bio;
942 bio->bi_rw |= REQ_WRITE;
943 }
944
945 osd_req_write(or, &obj, per_dev->offset, bio, per_dev->length);
946
947 ret = osd_finalize_request(or, 0, cred->oc_cap.cred, NULL);
948 if (ret) {
949 dprintk("%s: Faild to osd_finalize_request() => %d\n",
950 __func__, ret);
951 goto err;
952 }
953
954 dprintk("%s:[%d] dev=%d obj=0x%llx start=0x%llx length=0x%lx\n",
955 __func__, cur_comp, dev, obj.id, _LLU(per_dev->offset),
956 per_dev->length);
957 }
958
959err:
960 return ret;
961}
962
963static ssize_t _write_exec(struct objio_state *ios)
964{
965 unsigned i;
966 int ret;
967
968 for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) {
969 if (!ios->per_dev[i].length)
970 continue;
971 ret = _write_mirrors(ios, i);
972 if (unlikely(ret))
973 goto err;
974 }
975
976 ios->done = _write_done;
977 return _io_exec(ios); /* In sync mode exec returns the io->status */
978
979err:
980 _io_free(ios);
981 return ret;
982}
983
984ssize_t objio_write_pagelist(struct objlayout_io_state *ol_state, bool stable)
985{
986 struct objio_state *ios = container_of(ol_state, struct objio_state,
987 ol_state);
988 int ret;
989
990 /* TODO: ios->stable = stable; */
991 ret = _io_rw_pagelist(ios, GFP_NOFS);
992 if (unlikely(ret))
993 return ret;
994
995 return _write_exec(ios);
996}
997
998static bool objio_pg_test(struct nfs_pageio_descriptor *pgio,
999 struct nfs_page *prev, struct nfs_page *req)
1000{
1001 if (!pnfs_generic_pg_test(pgio, prev, req))
1002 return false;
1003
1004 return pgio->pg_count + req->wb_bytes <=
1005 OBJIO_LSEG(pgio->pg_lseg)->max_io_size;
1006}
1007
1008static struct pnfs_layoutdriver_type objlayout_type = {
1009 .id = LAYOUT_OSD2_OBJECTS,
1010 .name = "LAYOUT_OSD2_OBJECTS",
1011 .flags = PNFS_LAYOUTRET_ON_SETATTR,
1012
1013 .alloc_layout_hdr = objlayout_alloc_layout_hdr,
1014 .free_layout_hdr = objlayout_free_layout_hdr,
1015
1016 .alloc_lseg = objlayout_alloc_lseg,
1017 .free_lseg = objlayout_free_lseg,
1018
1019 .read_pagelist = objlayout_read_pagelist,
1020 .write_pagelist = objlayout_write_pagelist,
1021 .pg_test = objio_pg_test,
1022
1023 .free_deviceid_node = objio_free_deviceid_node,
1024
1025 .encode_layoutcommit = objlayout_encode_layoutcommit,
1026 .encode_layoutreturn = objlayout_encode_layoutreturn,
1027};
1028
1029MODULE_DESCRIPTION("pNFS Layout Driver for OSD2 objects");
1030MODULE_AUTHOR("Benny Halevy <bhalevy@panasas.com>");
1031MODULE_LICENSE("GPL");
1032
1033static int __init
1034objlayout_init(void)
1035{
1036 int ret = pnfs_register_layoutdriver(&objlayout_type);
1037
1038 if (ret)
1039 printk(KERN_INFO
1040 "%s: Registering OSD pNFS Layout Driver failed: error=%d\n",
1041 __func__, ret);
1042 else
1043 printk(KERN_INFO "%s: Registered OSD pNFS Layout Driver\n",
1044 __func__);
1045 return ret;
1046}
1047
1048static void __exit
1049objlayout_exit(void)
1050{
1051 pnfs_unregister_layoutdriver(&objlayout_type);
1052 printk(KERN_INFO "%s: Unregistered OSD pNFS Layout Driver\n",
1053 __func__);
1054}
1055
1056module_init(objlayout_init);
1057module_exit(objlayout_exit);
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
new file mode 100644
index 000000000000..dc3956c0de80
--- /dev/null
+++ b/fs/nfs/objlayout/objlayout.c
@@ -0,0 +1,712 @@
1/*
2 * pNFS Objects layout driver high level definitions
3 *
4 * Copyright (C) 2007 Panasas Inc. [year of first publication]
5 * All rights reserved.
6 *
7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * See the file COPYING included with this distribution for more details.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 *
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the Panasas company nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
34 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#include <scsi/osd_initiator.h>
41#include "objlayout.h"
42
43#define NFSDBG_FACILITY NFSDBG_PNFS_LD
44/*
45 * Create a objlayout layout structure for the given inode and return it.
46 */
47struct pnfs_layout_hdr *
48objlayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
49{
50 struct objlayout *objlay;
51
52 objlay = kzalloc(sizeof(struct objlayout), gfp_flags);
53 if (objlay) {
54 spin_lock_init(&objlay->lock);
55 INIT_LIST_HEAD(&objlay->err_list);
56 }
57 dprintk("%s: Return %p\n", __func__, objlay);
58 return &objlay->pnfs_layout;
59}
60
61/*
62 * Free an objlayout layout structure
63 */
64void
65objlayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
66{
67 struct objlayout *objlay = OBJLAYOUT(lo);
68
69 dprintk("%s: objlay %p\n", __func__, objlay);
70
71 WARN_ON(!list_empty(&objlay->err_list));
72 kfree(objlay);
73}
74
75/*
76 * Unmarshall layout and store it in pnfslay.
77 */
78struct pnfs_layout_segment *
79objlayout_alloc_lseg(struct pnfs_layout_hdr *pnfslay,
80 struct nfs4_layoutget_res *lgr,
81 gfp_t gfp_flags)
82{
83 int status = -ENOMEM;
84 struct xdr_stream stream;
85 struct xdr_buf buf = {
86 .pages = lgr->layoutp->pages,
87 .page_len = lgr->layoutp->len,
88 .buflen = lgr->layoutp->len,
89 .len = lgr->layoutp->len,
90 };
91 struct page *scratch;
92 struct pnfs_layout_segment *lseg;
93
94 dprintk("%s: Begin pnfslay %p\n", __func__, pnfslay);
95
96 scratch = alloc_page(gfp_flags);
97 if (!scratch)
98 goto err_nofree;
99
100 xdr_init_decode(&stream, &buf, NULL);
101 xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);
102
103 status = objio_alloc_lseg(&lseg, pnfslay, &lgr->range, &stream, gfp_flags);
104 if (unlikely(status)) {
105 dprintk("%s: objio_alloc_lseg Return err %d\n", __func__,
106 status);
107 goto err;
108 }
109
110 __free_page(scratch);
111
112 dprintk("%s: Return %p\n", __func__, lseg);
113 return lseg;
114
115err:
116 __free_page(scratch);
117err_nofree:
118 dprintk("%s: Err Return=>%d\n", __func__, status);
119 return ERR_PTR(status);
120}
121
122/*
123 * Free a layout segement
124 */
125void
126objlayout_free_lseg(struct pnfs_layout_segment *lseg)
127{
128 dprintk("%s: freeing layout segment %p\n", __func__, lseg);
129
130 if (unlikely(!lseg))
131 return;
132
133 objio_free_lseg(lseg);
134}
135
136/*
137 * I/O Operations
138 */
139static inline u64
140end_offset(u64 start, u64 len)
141{
142 u64 end;
143
144 end = start + len;
145 return end >= start ? end : NFS4_MAX_UINT64;
146}
147
148/* last octet in a range */
149static inline u64
150last_byte_offset(u64 start, u64 len)
151{
152 u64 end;
153
154 BUG_ON(!len);
155 end = start + len;
156 return end > start ? end - 1 : NFS4_MAX_UINT64;
157}
158
159static struct objlayout_io_state *
160objlayout_alloc_io_state(struct pnfs_layout_hdr *pnfs_layout_type,
161 struct page **pages,
162 unsigned pgbase,
163 loff_t offset,
164 size_t count,
165 struct pnfs_layout_segment *lseg,
166 void *rpcdata,
167 gfp_t gfp_flags)
168{
169 struct objlayout_io_state *state;
170 u64 lseg_end_offset;
171
172 dprintk("%s: allocating io_state\n", __func__);
173 if (objio_alloc_io_state(lseg, &state, gfp_flags))
174 return NULL;
175
176 BUG_ON(offset < lseg->pls_range.offset);
177 lseg_end_offset = end_offset(lseg->pls_range.offset,
178 lseg->pls_range.length);
179 BUG_ON(offset >= lseg_end_offset);
180 if (offset + count > lseg_end_offset) {
181 count = lseg->pls_range.length -
182 (offset - lseg->pls_range.offset);
183 dprintk("%s: truncated count %Zd\n", __func__, count);
184 }
185
186 if (pgbase > PAGE_SIZE) {
187 pages += pgbase >> PAGE_SHIFT;
188 pgbase &= ~PAGE_MASK;
189 }
190
191 INIT_LIST_HEAD(&state->err_list);
192 state->lseg = lseg;
193 state->rpcdata = rpcdata;
194 state->pages = pages;
195 state->pgbase = pgbase;
196 state->nr_pages = (pgbase + count + PAGE_SIZE - 1) >> PAGE_SHIFT;
197 state->offset = offset;
198 state->count = count;
199 state->sync = 0;
200
201 return state;
202}
203
204static void
205objlayout_free_io_state(struct objlayout_io_state *state)
206{
207 dprintk("%s: freeing io_state\n", __func__);
208 if (unlikely(!state))
209 return;
210
211 objio_free_io_state(state);
212}
213
214/*
215 * I/O done common code
216 */
217static void
218objlayout_iodone(struct objlayout_io_state *state)
219{
220 dprintk("%s: state %p status\n", __func__, state);
221
222 if (likely(state->status >= 0)) {
223 objlayout_free_io_state(state);
224 } else {
225 struct objlayout *objlay = OBJLAYOUT(state->lseg->pls_layout);
226
227 spin_lock(&objlay->lock);
228 objlay->delta_space_valid = OBJ_DSU_INVALID;
229 list_add(&objlay->err_list, &state->err_list);
230 spin_unlock(&objlay->lock);
231 }
232}
233
234/*
235 * objlayout_io_set_result - Set an osd_error code on a specific osd comp.
236 *
237 * The @index component IO failed (error returned from target). Register
238 * the error for later reporting at layout-return.
239 */
240void
241objlayout_io_set_result(struct objlayout_io_state *state, unsigned index,
242 struct pnfs_osd_objid *pooid, int osd_error,
243 u64 offset, u64 length, bool is_write)
244{
245 struct pnfs_osd_ioerr *ioerr = &state->ioerrs[index];
246
247 BUG_ON(index >= state->num_comps);
248 if (osd_error) {
249 ioerr->oer_component = *pooid;
250 ioerr->oer_comp_offset = offset;
251 ioerr->oer_comp_length = length;
252 ioerr->oer_iswrite = is_write;
253 ioerr->oer_errno = osd_error;
254
255 dprintk("%s: err[%d]: errno=%d is_write=%d dev(%llx:%llx) "
256 "par=0x%llx obj=0x%llx offset=0x%llx length=0x%llx\n",
257 __func__, index, ioerr->oer_errno,
258 ioerr->oer_iswrite,
259 _DEVID_LO(&ioerr->oer_component.oid_device_id),
260 _DEVID_HI(&ioerr->oer_component.oid_device_id),
261 ioerr->oer_component.oid_partition_id,
262 ioerr->oer_component.oid_object_id,
263 ioerr->oer_comp_offset,
264 ioerr->oer_comp_length);
265 } else {
266 /* User need not call if no error is reported */
267 ioerr->oer_errno = 0;
268 }
269}
270
271/* Function scheduled on rpc workqueue to call ->nfs_readlist_complete().
272 * This is because the osd completion is called with ints-off from
273 * the block layer
274 */
275static void _rpc_read_complete(struct work_struct *work)
276{
277 struct rpc_task *task;
278 struct nfs_read_data *rdata;
279
280 dprintk("%s enter\n", __func__);
281 task = container_of(work, struct rpc_task, u.tk_work);
282 rdata = container_of(task, struct nfs_read_data, task);
283
284 pnfs_ld_read_done(rdata);
285}
286
287void
288objlayout_read_done(struct objlayout_io_state *state, ssize_t status, bool sync)
289{
290 int eof = state->eof;
291 struct nfs_read_data *rdata;
292
293 state->status = status;
294 dprintk("%s: Begin status=%ld eof=%d\n", __func__, status, eof);
295 rdata = state->rpcdata;
296 rdata->task.tk_status = status;
297 if (status >= 0) {
298 rdata->res.count = status;
299 rdata->res.eof = eof;
300 }
301 objlayout_iodone(state);
302 /* must not use state after this point */
303
304 if (sync)
305 pnfs_ld_read_done(rdata);
306 else {
307 INIT_WORK(&rdata->task.u.tk_work, _rpc_read_complete);
308 schedule_work(&rdata->task.u.tk_work);
309 }
310}
311
312/*
313 * Perform sync or async reads.
314 */
315enum pnfs_try_status
316objlayout_read_pagelist(struct nfs_read_data *rdata)
317{
318 loff_t offset = rdata->args.offset;
319 size_t count = rdata->args.count;
320 struct objlayout_io_state *state;
321 ssize_t status = 0;
322 loff_t eof;
323
324 dprintk("%s: Begin inode %p offset %llu count %d\n",
325 __func__, rdata->inode, offset, (int)count);
326
327 eof = i_size_read(rdata->inode);
328 if (unlikely(offset + count > eof)) {
329 if (offset >= eof) {
330 status = 0;
331 rdata->res.count = 0;
332 rdata->res.eof = 1;
333 goto out;
334 }
335 count = eof - offset;
336 }
337
338 state = objlayout_alloc_io_state(NFS_I(rdata->inode)->layout,
339 rdata->args.pages, rdata->args.pgbase,
340 offset, count,
341 rdata->lseg, rdata,
342 GFP_KERNEL);
343 if (unlikely(!state)) {
344 status = -ENOMEM;
345 goto out;
346 }
347
348 state->eof = state->offset + state->count >= eof;
349
350 status = objio_read_pagelist(state);
351 out:
352 dprintk("%s: Return status %Zd\n", __func__, status);
353 rdata->pnfs_error = status;
354 return PNFS_ATTEMPTED;
355}
356
357/* Function scheduled on rpc workqueue to call ->nfs_writelist_complete().
358 * This is because the osd completion is called with ints-off from
359 * the block layer
360 */
361static void _rpc_write_complete(struct work_struct *work)
362{
363 struct rpc_task *task;
364 struct nfs_write_data *wdata;
365
366 dprintk("%s enter\n", __func__);
367 task = container_of(work, struct rpc_task, u.tk_work);
368 wdata = container_of(task, struct nfs_write_data, task);
369
370 pnfs_ld_write_done(wdata);
371}
372
373void
374objlayout_write_done(struct objlayout_io_state *state, ssize_t status,
375 bool sync)
376{
377 struct nfs_write_data *wdata;
378
379 dprintk("%s: Begin\n", __func__);
380 wdata = state->rpcdata;
381 state->status = status;
382 wdata->task.tk_status = status;
383 if (status >= 0) {
384 wdata->res.count = status;
385 wdata->verf.committed = state->committed;
386 dprintk("%s: Return status %d committed %d\n",
387 __func__, wdata->task.tk_status,
388 wdata->verf.committed);
389 } else
390 dprintk("%s: Return status %d\n",
391 __func__, wdata->task.tk_status);
392 objlayout_iodone(state);
393 /* must not use state after this point */
394
395 if (sync)
396 pnfs_ld_write_done(wdata);
397 else {
398 INIT_WORK(&wdata->task.u.tk_work, _rpc_write_complete);
399 schedule_work(&wdata->task.u.tk_work);
400 }
401}
402
403/*
404 * Perform sync or async writes.
405 */
406enum pnfs_try_status
407objlayout_write_pagelist(struct nfs_write_data *wdata,
408 int how)
409{
410 struct objlayout_io_state *state;
411 ssize_t status;
412
413 dprintk("%s: Begin inode %p offset %llu count %u\n",
414 __func__, wdata->inode, wdata->args.offset, wdata->args.count);
415
416 state = objlayout_alloc_io_state(NFS_I(wdata->inode)->layout,
417 wdata->args.pages,
418 wdata->args.pgbase,
419 wdata->args.offset,
420 wdata->args.count,
421 wdata->lseg, wdata,
422 GFP_NOFS);
423 if (unlikely(!state)) {
424 status = -ENOMEM;
425 goto out;
426 }
427
428 state->sync = how & FLUSH_SYNC;
429
430 status = objio_write_pagelist(state, how & FLUSH_STABLE);
431 out:
432 dprintk("%s: Return status %Zd\n", __func__, status);
433 wdata->pnfs_error = status;
434 return PNFS_ATTEMPTED;
435}
436
437void
438objlayout_encode_layoutcommit(struct pnfs_layout_hdr *pnfslay,
439 struct xdr_stream *xdr,
440 const struct nfs4_layoutcommit_args *args)
441{
442 struct objlayout *objlay = OBJLAYOUT(pnfslay);
443 struct pnfs_osd_layoutupdate lou;
444 __be32 *start;
445
446 dprintk("%s: Begin\n", __func__);
447
448 spin_lock(&objlay->lock);
449 lou.dsu_valid = (objlay->delta_space_valid == OBJ_DSU_VALID);
450 lou.dsu_delta = objlay->delta_space_used;
451 objlay->delta_space_used = 0;
452 objlay->delta_space_valid = OBJ_DSU_INIT;
453 lou.olu_ioerr_flag = !list_empty(&objlay->err_list);
454 spin_unlock(&objlay->lock);
455
456 start = xdr_reserve_space(xdr, 4);
457
458 BUG_ON(pnfs_osd_xdr_encode_layoutupdate(xdr, &lou));
459
460 *start = cpu_to_be32((xdr->p - start - 1) * 4);
461
462 dprintk("%s: Return delta_space_used %lld err %d\n", __func__,
463 lou.dsu_delta, lou.olu_ioerr_flag);
464}
465
466static int
467err_prio(u32 oer_errno)
468{
469 switch (oer_errno) {
470 case 0:
471 return 0;
472
473 case PNFS_OSD_ERR_RESOURCE:
474 return OSD_ERR_PRI_RESOURCE;
475 case PNFS_OSD_ERR_BAD_CRED:
476 return OSD_ERR_PRI_BAD_CRED;
477 case PNFS_OSD_ERR_NO_ACCESS:
478 return OSD_ERR_PRI_NO_ACCESS;
479 case PNFS_OSD_ERR_UNREACHABLE:
480 return OSD_ERR_PRI_UNREACHABLE;
481 case PNFS_OSD_ERR_NOT_FOUND:
482 return OSD_ERR_PRI_NOT_FOUND;
483 case PNFS_OSD_ERR_NO_SPACE:
484 return OSD_ERR_PRI_NO_SPACE;
485 default:
486 WARN_ON(1);
487 /* fallthrough */
488 case PNFS_OSD_ERR_EIO:
489 return OSD_ERR_PRI_EIO;
490 }
491}
492
493static void
494merge_ioerr(struct pnfs_osd_ioerr *dest_err,
495 const struct pnfs_osd_ioerr *src_err)
496{
497 u64 dest_end, src_end;
498
499 if (!dest_err->oer_errno) {
500 *dest_err = *src_err;
501 /* accumulated device must be blank */
502 memset(&dest_err->oer_component.oid_device_id, 0,
503 sizeof(dest_err->oer_component.oid_device_id));
504
505 return;
506 }
507
508 if (dest_err->oer_component.oid_partition_id !=
509 src_err->oer_component.oid_partition_id)
510 dest_err->oer_component.oid_partition_id = 0;
511
512 if (dest_err->oer_component.oid_object_id !=
513 src_err->oer_component.oid_object_id)
514 dest_err->oer_component.oid_object_id = 0;
515
516 if (dest_err->oer_comp_offset > src_err->oer_comp_offset)
517 dest_err->oer_comp_offset = src_err->oer_comp_offset;
518
519 dest_end = end_offset(dest_err->oer_comp_offset,
520 dest_err->oer_comp_length);
521 src_end = end_offset(src_err->oer_comp_offset,
522 src_err->oer_comp_length);
523 if (dest_end < src_end)
524 dest_end = src_end;
525
526 dest_err->oer_comp_length = dest_end - dest_err->oer_comp_offset;
527
528 if ((src_err->oer_iswrite == dest_err->oer_iswrite) &&
529 (err_prio(src_err->oer_errno) > err_prio(dest_err->oer_errno))) {
530 dest_err->oer_errno = src_err->oer_errno;
531 } else if (src_err->oer_iswrite) {
532 dest_err->oer_iswrite = true;
533 dest_err->oer_errno = src_err->oer_errno;
534 }
535}
536
537static void
538encode_accumulated_error(struct objlayout *objlay, __be32 *p)
539{
540 struct objlayout_io_state *state, *tmp;
541 struct pnfs_osd_ioerr accumulated_err = {.oer_errno = 0};
542
543 list_for_each_entry_safe(state, tmp, &objlay->err_list, err_list) {
544 unsigned i;
545
546 for (i = 0; i < state->num_comps; i++) {
547 struct pnfs_osd_ioerr *ioerr = &state->ioerrs[i];
548
549 if (!ioerr->oer_errno)
550 continue;
551
552 printk(KERN_ERR "%s: err[%d]: errno=%d is_write=%d "
553 "dev(%llx:%llx) par=0x%llx obj=0x%llx "
554 "offset=0x%llx length=0x%llx\n",
555 __func__, i, ioerr->oer_errno,
556 ioerr->oer_iswrite,
557 _DEVID_LO(&ioerr->oer_component.oid_device_id),
558 _DEVID_HI(&ioerr->oer_component.oid_device_id),
559 ioerr->oer_component.oid_partition_id,
560 ioerr->oer_component.oid_object_id,
561 ioerr->oer_comp_offset,
562 ioerr->oer_comp_length);
563
564 merge_ioerr(&accumulated_err, ioerr);
565 }
566 list_del(&state->err_list);
567 objlayout_free_io_state(state);
568 }
569
570 pnfs_osd_xdr_encode_ioerr(p, &accumulated_err);
571}
572
573void
574objlayout_encode_layoutreturn(struct pnfs_layout_hdr *pnfslay,
575 struct xdr_stream *xdr,
576 const struct nfs4_layoutreturn_args *args)
577{
578 struct objlayout *objlay = OBJLAYOUT(pnfslay);
579 struct objlayout_io_state *state, *tmp;
580 __be32 *start;
581
582 dprintk("%s: Begin\n", __func__);
583 start = xdr_reserve_space(xdr, 4);
584 BUG_ON(!start);
585
586 spin_lock(&objlay->lock);
587
588 list_for_each_entry_safe(state, tmp, &objlay->err_list, err_list) {
589 __be32 *last_xdr = NULL, *p;
590 unsigned i;
591 int res = 0;
592
593 for (i = 0; i < state->num_comps; i++) {
594 struct pnfs_osd_ioerr *ioerr = &state->ioerrs[i];
595
596 if (!ioerr->oer_errno)
597 continue;
598
599 dprintk("%s: err[%d]: errno=%d is_write=%d "
600 "dev(%llx:%llx) par=0x%llx obj=0x%llx "
601 "offset=0x%llx length=0x%llx\n",
602 __func__, i, ioerr->oer_errno,
603 ioerr->oer_iswrite,
604 _DEVID_LO(&ioerr->oer_component.oid_device_id),
605 _DEVID_HI(&ioerr->oer_component.oid_device_id),
606 ioerr->oer_component.oid_partition_id,
607 ioerr->oer_component.oid_object_id,
608 ioerr->oer_comp_offset,
609 ioerr->oer_comp_length);
610
611 p = pnfs_osd_xdr_ioerr_reserve_space(xdr);
612 if (unlikely(!p)) {
613 res = -E2BIG;
614 break; /* accumulated_error */
615 }
616
617 last_xdr = p;
618 pnfs_osd_xdr_encode_ioerr(p, &state->ioerrs[i]);
619 }
620
621 /* TODO: use xdr_write_pages */
622 if (unlikely(res)) {
623 /* no space for even one error descriptor */
624 BUG_ON(!last_xdr);
625
626 /* we've encountered a situation with lots and lots of
627 * errors and no space to encode them all. Use the last
628 * available slot to report the union of all the
629 * remaining errors.
630 */
631 encode_accumulated_error(objlay, last_xdr);
632 goto loop_done;
633 }
634 list_del(&state->err_list);
635 objlayout_free_io_state(state);
636 }
637loop_done:
638 spin_unlock(&objlay->lock);
639
640 *start = cpu_to_be32((xdr->p - start - 1) * 4);
641 dprintk("%s: Return\n", __func__);
642}
643
644
645/*
646 * Get Device Info API for io engines
647 */
648struct objlayout_deviceinfo {
649 struct page *page;
650 struct pnfs_osd_deviceaddr da; /* This must be last */
651};
652
653/* Initialize and call nfs_getdeviceinfo, then decode and return a
654 * "struct pnfs_osd_deviceaddr *" Eventually objlayout_put_deviceinfo()
655 * should be called.
656 */
657int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
658 struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
659 gfp_t gfp_flags)
660{
661 struct objlayout_deviceinfo *odi;
662 struct pnfs_device pd;
663 struct super_block *sb;
664 struct page *page, **pages;
665 u32 *p;
666 int err;
667
668 page = alloc_page(gfp_flags);
669 if (!page)
670 return -ENOMEM;
671
672 pages = &page;
673 pd.pages = pages;
674
675 memcpy(&pd.dev_id, d_id, sizeof(*d_id));
676 pd.layout_type = LAYOUT_OSD2_OBJECTS;
677 pd.pages = &page;
678 pd.pgbase = 0;
679 pd.pglen = PAGE_SIZE;
680 pd.mincount = 0;
681
682 sb = pnfslay->plh_inode->i_sb;
683 err = nfs4_proc_getdeviceinfo(NFS_SERVER(pnfslay->plh_inode), &pd);
684 dprintk("%s nfs_getdeviceinfo returned %d\n", __func__, err);
685 if (err)
686 goto err_out;
687
688 p = page_address(page);
689 odi = kzalloc(sizeof(*odi), gfp_flags);
690 if (!odi) {
691 err = -ENOMEM;
692 goto err_out;
693 }
694 pnfs_osd_xdr_decode_deviceaddr(&odi->da, p);
695 odi->page = page;
696 *deviceaddr = &odi->da;
697 return 0;
698
699err_out:
700 __free_page(page);
701 return err;
702}
703
704void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr)
705{
706 struct objlayout_deviceinfo *odi = container_of(deviceaddr,
707 struct objlayout_deviceinfo,
708 da);
709
710 __free_page(odi->page);
711 kfree(odi);
712}
diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h
new file mode 100644
index 000000000000..a8244c8e042d
--- /dev/null
+++ b/fs/nfs/objlayout/objlayout.h
@@ -0,0 +1,187 @@
1/*
2 * Data types and function declerations for interfacing with the
3 * pNFS standard object layout driver.
4 *
5 * Copyright (C) 2007 Panasas Inc. [year of first publication]
6 * All rights reserved.
7 *
8 * Benny Halevy <bhalevy@panasas.com>
9 * Boaz Harrosh <bharrosh@panasas.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2
13 * See the file COPYING included with this distribution for more details.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 *
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 * 3. Neither the name of the Panasas company nor the names of its
25 * contributors may be used to endorse or promote products derived
26 * from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
29 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
30 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
31 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
35 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
36 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
37 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
38 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 */
40
41#ifndef _OBJLAYOUT_H
42#define _OBJLAYOUT_H
43
44#include <linux/nfs_fs.h>
45#include <linux/pnfs_osd_xdr.h>
46#include "../pnfs.h"
47
48/*
49 * per-inode layout
50 */
51struct objlayout {
52 struct pnfs_layout_hdr pnfs_layout;
53
54 /* for layout_commit */
55 enum osd_delta_space_valid_enum {
56 OBJ_DSU_INIT = 0,
57 OBJ_DSU_VALID,
58 OBJ_DSU_INVALID,
59 } delta_space_valid;
60 s64 delta_space_used; /* consumed by write ops */
61
62 /* for layout_return */
63 spinlock_t lock;
64 struct list_head err_list;
65};
66
67static inline struct objlayout *
68OBJLAYOUT(struct pnfs_layout_hdr *lo)
69{
70 return container_of(lo, struct objlayout, pnfs_layout);
71}
72
73/*
74 * per-I/O operation state
75 * embedded in objects provider io_state data structure
76 */
77struct objlayout_io_state {
78 struct pnfs_layout_segment *lseg;
79
80 struct page **pages;
81 unsigned pgbase;
82 unsigned nr_pages;
83 unsigned long count;
84 loff_t offset;
85 bool sync;
86
87 void *rpcdata;
88 int status; /* res */
89 int eof; /* res */
90 int committed; /* res */
91
92 /* Error reporting (layout_return) */
93 struct list_head err_list;
94 unsigned num_comps;
95 /* Pointer to array of error descriptors of size num_comps.
96 * It should contain as many entries as devices in the osd_layout
97 * that participate in the I/O. It is up to the io_engine to allocate
98 * needed space and set num_comps.
99 */
100 struct pnfs_osd_ioerr *ioerrs;
101};
102
103/*
104 * Raid engine I/O API
105 */
106extern int objio_alloc_lseg(struct pnfs_layout_segment **outp,
107 struct pnfs_layout_hdr *pnfslay,
108 struct pnfs_layout_range *range,
109 struct xdr_stream *xdr,
110 gfp_t gfp_flags);
111extern void objio_free_lseg(struct pnfs_layout_segment *lseg);
112
113extern int objio_alloc_io_state(
114 struct pnfs_layout_segment *lseg,
115 struct objlayout_io_state **outp,
116 gfp_t gfp_flags);
117extern void objio_free_io_state(struct objlayout_io_state *state);
118
119extern ssize_t objio_read_pagelist(struct objlayout_io_state *ol_state);
120extern ssize_t objio_write_pagelist(struct objlayout_io_state *ol_state,
121 bool stable);
122
123/*
124 * callback API
125 */
126extern void objlayout_io_set_result(struct objlayout_io_state *state,
127 unsigned index, struct pnfs_osd_objid *pooid,
128 int osd_error, u64 offset, u64 length, bool is_write);
129
130static inline void
131objlayout_add_delta_space_used(struct objlayout_io_state *state, s64 space_used)
132{
133 struct objlayout *objlay = OBJLAYOUT(state->lseg->pls_layout);
134
135 /* If one of the I/Os errored out and the delta_space_used was
136 * invalid we render the complete report as invalid. Protocol mandate
137 * the DSU be accurate or not reported.
138 */
139 spin_lock(&objlay->lock);
140 if (objlay->delta_space_valid != OBJ_DSU_INVALID) {
141 objlay->delta_space_valid = OBJ_DSU_VALID;
142 objlay->delta_space_used += space_used;
143 }
144 spin_unlock(&objlay->lock);
145}
146
147extern void objlayout_read_done(struct objlayout_io_state *state,
148 ssize_t status, bool sync);
149extern void objlayout_write_done(struct objlayout_io_state *state,
150 ssize_t status, bool sync);
151
152extern int objlayout_get_deviceinfo(struct pnfs_layout_hdr *pnfslay,
153 struct nfs4_deviceid *d_id, struct pnfs_osd_deviceaddr **deviceaddr,
154 gfp_t gfp_flags);
155extern void objlayout_put_deviceinfo(struct pnfs_osd_deviceaddr *deviceaddr);
156
157/*
158 * exported generic objects function vectors
159 */
160
161extern struct pnfs_layout_hdr *objlayout_alloc_layout_hdr(struct inode *, gfp_t gfp_flags);
162extern void objlayout_free_layout_hdr(struct pnfs_layout_hdr *);
163
164extern struct pnfs_layout_segment *objlayout_alloc_lseg(
165 struct pnfs_layout_hdr *,
166 struct nfs4_layoutget_res *,
167 gfp_t gfp_flags);
168extern void objlayout_free_lseg(struct pnfs_layout_segment *);
169
170extern enum pnfs_try_status objlayout_read_pagelist(
171 struct nfs_read_data *);
172
173extern enum pnfs_try_status objlayout_write_pagelist(
174 struct nfs_write_data *,
175 int how);
176
177extern void objlayout_encode_layoutcommit(
178 struct pnfs_layout_hdr *,
179 struct xdr_stream *,
180 const struct nfs4_layoutcommit_args *);
181
182extern void objlayout_encode_layoutreturn(
183 struct pnfs_layout_hdr *,
184 struct xdr_stream *,
185 const struct nfs4_layoutreturn_args *);
186
187#endif /* _OBJLAYOUT_H */
diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
new file mode 100644
index 000000000000..16fc758e9123
--- /dev/null
+++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
@@ -0,0 +1,412 @@
1/*
2 * Object-Based pNFS Layout XDR layer
3 *
4 * Copyright (C) 2007 Panasas Inc. [year of first publication]
5 * All rights reserved.
6 *
7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * See the file COPYING included with this distribution for more details.
13 *
14 * Redistribution and use in source and binary forms, with or without
15 * modification, are permitted provided that the following conditions
16 * are met:
17 *
18 * 1. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 * notice, this list of conditions and the following disclaimer in the
22 * documentation and/or other materials provided with the distribution.
23 * 3. Neither the name of the Panasas company nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
28 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
29 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
30 * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
34 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
35 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
36 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
37 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#include <linux/pnfs_osd_xdr.h>
41
42#define NFSDBG_FACILITY NFSDBG_PNFS_LD
43
44/*
45 * The following implementation is based on RFC5664
46 */
47
48/*
49 * struct pnfs_osd_objid {
50 * struct nfs4_deviceid oid_device_id;
51 * u64 oid_partition_id;
52 * u64 oid_object_id;
53 * }; // xdr size 32 bytes
54 */
55static __be32 *
56_osd_xdr_decode_objid(__be32 *p, struct pnfs_osd_objid *objid)
57{
58 p = xdr_decode_opaque_fixed(p, objid->oid_device_id.data,
59 sizeof(objid->oid_device_id.data));
60
61 p = xdr_decode_hyper(p, &objid->oid_partition_id);
62 p = xdr_decode_hyper(p, &objid->oid_object_id);
63 return p;
64}
65/*
66 * struct pnfs_osd_opaque_cred {
67 * u32 cred_len;
68 * void *cred;
69 * }; // xdr size [variable]
70 * The return pointers are from the xdr buffer
71 */
72static int
73_osd_xdr_decode_opaque_cred(struct pnfs_osd_opaque_cred *opaque_cred,
74 struct xdr_stream *xdr)
75{
76 __be32 *p = xdr_inline_decode(xdr, 1);
77
78 if (!p)
79 return -EINVAL;
80
81 opaque_cred->cred_len = be32_to_cpu(*p++);
82
83 p = xdr_inline_decode(xdr, opaque_cred->cred_len);
84 if (!p)
85 return -EINVAL;
86
87 opaque_cred->cred = p;
88 return 0;
89}
90
91/*
92 * struct pnfs_osd_object_cred {
93 * struct pnfs_osd_objid oc_object_id;
94 * u32 oc_osd_version;
95 * u32 oc_cap_key_sec;
96 * struct pnfs_osd_opaque_cred oc_cap_key
97 * struct pnfs_osd_opaque_cred oc_cap;
98 * }; // xdr size 32 + 4 + 4 + [variable] + [variable]
99 */
100static int
101_osd_xdr_decode_object_cred(struct pnfs_osd_object_cred *comp,
102 struct xdr_stream *xdr)
103{
104 __be32 *p = xdr_inline_decode(xdr, 32 + 4 + 4);
105 int ret;
106
107 if (!p)
108 return -EIO;
109
110 p = _osd_xdr_decode_objid(p, &comp->oc_object_id);
111 comp->oc_osd_version = be32_to_cpup(p++);
112 comp->oc_cap_key_sec = be32_to_cpup(p);
113
114 ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap_key, xdr);
115 if (unlikely(ret))
116 return ret;
117
118 ret = _osd_xdr_decode_opaque_cred(&comp->oc_cap, xdr);
119 return ret;
120}
121
122/*
123 * struct pnfs_osd_data_map {
124 * u32 odm_num_comps;
125 * u64 odm_stripe_unit;
126 * u32 odm_group_width;
127 * u32 odm_group_depth;
128 * u32 odm_mirror_cnt;
129 * u32 odm_raid_algorithm;
130 * }; // xdr size 4 + 8 + 4 + 4 + 4 + 4
131 */
132static inline int
133_osd_data_map_xdr_sz(void)
134{
135 return 4 + 8 + 4 + 4 + 4 + 4;
136}
137
138static __be32 *
139_osd_xdr_decode_data_map(__be32 *p, struct pnfs_osd_data_map *data_map)
140{
141 data_map->odm_num_comps = be32_to_cpup(p++);
142 p = xdr_decode_hyper(p, &data_map->odm_stripe_unit);
143 data_map->odm_group_width = be32_to_cpup(p++);
144 data_map->odm_group_depth = be32_to_cpup(p++);
145 data_map->odm_mirror_cnt = be32_to_cpup(p++);
146 data_map->odm_raid_algorithm = be32_to_cpup(p++);
147 dprintk("%s: odm_num_comps=%u odm_stripe_unit=%llu odm_group_width=%u "
148 "odm_group_depth=%u odm_mirror_cnt=%u odm_raid_algorithm=%u\n",
149 __func__,
150 data_map->odm_num_comps,
151 (unsigned long long)data_map->odm_stripe_unit,
152 data_map->odm_group_width,
153 data_map->odm_group_depth,
154 data_map->odm_mirror_cnt,
155 data_map->odm_raid_algorithm);
156 return p;
157}
158
159int pnfs_osd_xdr_decode_layout_map(struct pnfs_osd_layout *layout,
160 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr)
161{
162 __be32 *p;
163
164 memset(iter, 0, sizeof(*iter));
165
166 p = xdr_inline_decode(xdr, _osd_data_map_xdr_sz() + 4 + 4);
167 if (unlikely(!p))
168 return -EINVAL;
169
170 p = _osd_xdr_decode_data_map(p, &layout->olo_map);
171 layout->olo_comps_index = be32_to_cpup(p++);
172 layout->olo_num_comps = be32_to_cpup(p++);
173 iter->total_comps = layout->olo_num_comps;
174 return 0;
175}
176
177bool pnfs_osd_xdr_decode_layout_comp(struct pnfs_osd_object_cred *comp,
178 struct pnfs_osd_xdr_decode_layout_iter *iter, struct xdr_stream *xdr,
179 int *err)
180{
181 BUG_ON(iter->decoded_comps > iter->total_comps);
182 if (iter->decoded_comps == iter->total_comps)
183 return false;
184
185 *err = _osd_xdr_decode_object_cred(comp, xdr);
186 if (unlikely(*err)) {
187 dprintk("%s: _osd_xdr_decode_object_cred=>%d decoded_comps=%d "
188 "total_comps=%d\n", __func__, *err,
189 iter->decoded_comps, iter->total_comps);
190 return false; /* stop the loop */
191 }
192 dprintk("%s: dev(%llx:%llx) par=0x%llx obj=0x%llx "
193 "key_len=%u cap_len=%u\n",
194 __func__,
195 _DEVID_LO(&comp->oc_object_id.oid_device_id),
196 _DEVID_HI(&comp->oc_object_id.oid_device_id),
197 comp->oc_object_id.oid_partition_id,
198 comp->oc_object_id.oid_object_id,
199 comp->oc_cap_key.cred_len, comp->oc_cap.cred_len);
200
201 iter->decoded_comps++;
202 return true;
203}
204
205/*
206 * Get Device Information Decoding
207 *
208 * Note: since Device Information is currently done synchronously, all
209 * variable strings fields are left inside the rpc buffer and are only
210 * pointed to by the pnfs_osd_deviceaddr members. So the read buffer
211 * should not be freed while the returned information is in use.
212 */
213/*
214 *struct nfs4_string {
215 * unsigned int len;
216 * char *data;
217 *}; // size [variable]
218 * NOTE: Returned string points to inside the XDR buffer
219 */
220static __be32 *
221__read_u8_opaque(__be32 *p, struct nfs4_string *str)
222{
223 str->len = be32_to_cpup(p++);
224 str->data = (char *)p;
225
226 p += XDR_QUADLEN(str->len);
227 return p;
228}
229
230/*
231 * struct pnfs_osd_targetid {
232 * u32 oti_type;
233 * struct nfs4_string oti_scsi_device_id;
234 * };// size 4 + [variable]
235 */
236static __be32 *
237__read_targetid(__be32 *p, struct pnfs_osd_targetid* targetid)
238{
239 u32 oti_type;
240
241 oti_type = be32_to_cpup(p++);
242 targetid->oti_type = oti_type;
243
244 switch (oti_type) {
245 case OBJ_TARGET_SCSI_NAME:
246 case OBJ_TARGET_SCSI_DEVICE_ID:
247 p = __read_u8_opaque(p, &targetid->oti_scsi_device_id);
248 }
249
250 return p;
251}
252
253/*
254 * struct pnfs_osd_net_addr {
255 * struct nfs4_string r_netid;
256 * struct nfs4_string r_addr;
257 * };
258 */
259static __be32 *
260__read_net_addr(__be32 *p, struct pnfs_osd_net_addr* netaddr)
261{
262 p = __read_u8_opaque(p, &netaddr->r_netid);
263 p = __read_u8_opaque(p, &netaddr->r_addr);
264
265 return p;
266}
267
268/*
269 * struct pnfs_osd_targetaddr {
270 * u32 ota_available;
271 * struct pnfs_osd_net_addr ota_netaddr;
272 * };
273 */
274static __be32 *
275__read_targetaddr(__be32 *p, struct pnfs_osd_targetaddr *targetaddr)
276{
277 u32 ota_available;
278
279 ota_available = be32_to_cpup(p++);
280 targetaddr->ota_available = ota_available;
281
282 if (ota_available)
283 p = __read_net_addr(p, &targetaddr->ota_netaddr);
284
285
286 return p;
287}
288
289/*
290 * struct pnfs_osd_deviceaddr {
291 * struct pnfs_osd_targetid oda_targetid;
292 * struct pnfs_osd_targetaddr oda_targetaddr;
293 * u8 oda_lun[8];
294 * struct nfs4_string oda_systemid;
295 * struct pnfs_osd_object_cred oda_root_obj_cred;
296 * struct nfs4_string oda_osdname;
297 * };
298 */
299
300/* We need this version for the pnfs_osd_xdr_decode_deviceaddr which does
301 * not have an xdr_stream
302 */
303static __be32 *
304__read_opaque_cred(__be32 *p,
305 struct pnfs_osd_opaque_cred *opaque_cred)
306{
307 opaque_cred->cred_len = be32_to_cpu(*p++);
308 opaque_cred->cred = p;
309 return p + XDR_QUADLEN(opaque_cred->cred_len);
310}
311
312static __be32 *
313__read_object_cred(__be32 *p, struct pnfs_osd_object_cred *comp)
314{
315 p = _osd_xdr_decode_objid(p, &comp->oc_object_id);
316 comp->oc_osd_version = be32_to_cpup(p++);
317 comp->oc_cap_key_sec = be32_to_cpup(p++);
318
319 p = __read_opaque_cred(p, &comp->oc_cap_key);
320 p = __read_opaque_cred(p, &comp->oc_cap);
321 return p;
322}
323
324void pnfs_osd_xdr_decode_deviceaddr(
325 struct pnfs_osd_deviceaddr *deviceaddr, __be32 *p)
326{
327 p = __read_targetid(p, &deviceaddr->oda_targetid);
328
329 p = __read_targetaddr(p, &deviceaddr->oda_targetaddr);
330
331 p = xdr_decode_opaque_fixed(p, deviceaddr->oda_lun,
332 sizeof(deviceaddr->oda_lun));
333
334 p = __read_u8_opaque(p, &deviceaddr->oda_systemid);
335
336 p = __read_object_cred(p, &deviceaddr->oda_root_obj_cred);
337
338 p = __read_u8_opaque(p, &deviceaddr->oda_osdname);
339
340 /* libosd likes this terminated in dbg. It's last, so no problems */
341 deviceaddr->oda_osdname.data[deviceaddr->oda_osdname.len] = 0;
342}
343
344/*
345 * struct pnfs_osd_layoutupdate {
346 * u32 dsu_valid;
347 * s64 dsu_delta;
348 * u32 olu_ioerr_flag;
349 * }; xdr size 4 + 8 + 4
350 */
351int
352pnfs_osd_xdr_encode_layoutupdate(struct xdr_stream *xdr,
353 struct pnfs_osd_layoutupdate *lou)
354{
355 __be32 *p = xdr_reserve_space(xdr, 4 + 8 + 4);
356
357 if (!p)
358 return -E2BIG;
359
360 *p++ = cpu_to_be32(lou->dsu_valid);
361 if (lou->dsu_valid)
362 p = xdr_encode_hyper(p, lou->dsu_delta);
363 *p++ = cpu_to_be32(lou->olu_ioerr_flag);
364 return 0;
365}
366
367/*
368 * struct pnfs_osd_objid {
369 * struct nfs4_deviceid oid_device_id;
370 * u64 oid_partition_id;
371 * u64 oid_object_id;
372 * }; // xdr size 32 bytes
373 */
374static inline __be32 *
375pnfs_osd_xdr_encode_objid(__be32 *p, struct pnfs_osd_objid *object_id)
376{
377 p = xdr_encode_opaque_fixed(p, &object_id->oid_device_id.data,
378 sizeof(object_id->oid_device_id.data));
379 p = xdr_encode_hyper(p, object_id->oid_partition_id);
380 p = xdr_encode_hyper(p, object_id->oid_object_id);
381
382 return p;
383}
384
385/*
386 * struct pnfs_osd_ioerr {
387 * struct pnfs_osd_objid oer_component;
388 * u64 oer_comp_offset;
389 * u64 oer_comp_length;
390 * u32 oer_iswrite;
391 * u32 oer_errno;
392 * }; // xdr size 32 + 24 bytes
393 */
394void pnfs_osd_xdr_encode_ioerr(__be32 *p, struct pnfs_osd_ioerr *ioerr)
395{
396 p = pnfs_osd_xdr_encode_objid(p, &ioerr->oer_component);
397 p = xdr_encode_hyper(p, ioerr->oer_comp_offset);
398 p = xdr_encode_hyper(p, ioerr->oer_comp_length);
399 *p++ = cpu_to_be32(ioerr->oer_iswrite);
400 *p = cpu_to_be32(ioerr->oer_errno);
401}
402
403__be32 *pnfs_osd_xdr_ioerr_reserve_space(struct xdr_stream *xdr)
404{
405 __be32 *p;
406
407 p = xdr_reserve_space(xdr, 32 + 24);
408 if (unlikely(!p))
409 dprintk("%s: out of xdr space\n", __func__);
410
411 return p;
412}