diff options
Diffstat (limited to 'fs/exofs/ios.c')
-rw-r--r-- | fs/exofs/ios.c | 575 |
1 files changed, 488 insertions, 87 deletions
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c index 5bad01fa1f9f..5293bc411d17 100644 --- a/fs/exofs/ios.c +++ b/fs/exofs/ios.c | |||
@@ -23,9 +23,13 @@ | |||
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <scsi/scsi_device.h> | 25 | #include <scsi/scsi_device.h> |
26 | #include <asm/div64.h> | ||
26 | 27 | ||
27 | #include "exofs.h" | 28 | #include "exofs.h" |
28 | 29 | ||
30 | #define EXOFS_DBGMSG2(M...) do {} while (0) | ||
31 | /* #define EXOFS_DBGMSG2 EXOFS_DBGMSG */ | ||
32 | |||
29 | void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj) | 33 | void exofs_make_credential(u8 cred_a[OSD_CAP_LEN], const struct osd_obj_id *obj) |
30 | { | 34 | { |
31 | osd_sec_init_nosec_doall_caps(cred_a, obj, false, true); | 35 | osd_sec_init_nosec_doall_caps(cred_a, obj, false, true); |
@@ -64,21 +68,24 @@ out: | |||
64 | return ret; | 68 | return ret; |
65 | } | 69 | } |
66 | 70 | ||
67 | int exofs_get_io_state(struct exofs_sb_info *sbi, struct exofs_io_state** pios) | 71 | int exofs_get_io_state(struct exofs_layout *layout, |
72 | struct exofs_io_state **pios) | ||
68 | { | 73 | { |
69 | struct exofs_io_state *ios; | 74 | struct exofs_io_state *ios; |
70 | 75 | ||
71 | /*TODO: Maybe use kmem_cach per sbi of size | 76 | /*TODO: Maybe use kmem_cach per sbi of size |
72 | * exofs_io_state_size(sbi->s_numdevs) | 77 | * exofs_io_state_size(layout->s_numdevs) |
73 | */ | 78 | */ |
74 | ios = kzalloc(exofs_io_state_size(sbi->s_numdevs), GFP_KERNEL); | 79 | ios = kzalloc(exofs_io_state_size(layout->s_numdevs), GFP_KERNEL); |
75 | if (unlikely(!ios)) { | 80 | if (unlikely(!ios)) { |
81 | EXOFS_DBGMSG("Faild kzalloc bytes=%d\n", | ||
82 | exofs_io_state_size(layout->s_numdevs)); | ||
76 | *pios = NULL; | 83 | *pios = NULL; |
77 | return -ENOMEM; | 84 | return -ENOMEM; |
78 | } | 85 | } |
79 | 86 | ||
80 | ios->sbi = sbi; | 87 | ios->layout = layout; |
81 | ios->obj.partition = sbi->s_pid; | 88 | ios->obj.partition = layout->s_pid; |
82 | *pios = ios; | 89 | *pios = ios; |
83 | return 0; | 90 | return 0; |
84 | } | 91 | } |
@@ -101,6 +108,29 @@ void exofs_put_io_state(struct exofs_io_state *ios) | |||
101 | } | 108 | } |
102 | } | 109 | } |
103 | 110 | ||
111 | unsigned exofs_layout_od_id(struct exofs_layout *layout, | ||
112 | osd_id obj_no, unsigned layout_index) | ||
113 | { | ||
114 | /* switch (layout->lay_func) { | ||
115 | case LAYOUT_MOVING_WINDOW: | ||
116 | {*/ | ||
117 | unsigned dev_mod = obj_no; | ||
118 | |||
119 | return (layout_index + dev_mod * layout->mirrors_p1) % | ||
120 | layout->s_numdevs; | ||
121 | /* } | ||
122 | case LAYOUT_FUNC_IMPLICT: | ||
123 | return layout->devs[layout_index]; | ||
124 | }*/ | ||
125 | } | ||
126 | |||
127 | static inline struct osd_dev *exofs_ios_od(struct exofs_io_state *ios, | ||
128 | unsigned layout_index) | ||
129 | { | ||
130 | return ios->layout->s_ods[ | ||
131 | exofs_layout_od_id(ios->layout, ios->obj.id, layout_index)]; | ||
132 | } | ||
133 | |||
104 | static void _sync_done(struct exofs_io_state *ios, void *p) | 134 | static void _sync_done(struct exofs_io_state *ios, void *p) |
105 | { | 135 | { |
106 | struct completion *waiting = p; | 136 | struct completion *waiting = p; |
@@ -168,6 +198,21 @@ static int exofs_io_execute(struct exofs_io_state *ios) | |||
168 | return ret; | 198 | return ret; |
169 | } | 199 | } |
170 | 200 | ||
201 | static void _clear_bio(struct bio *bio) | ||
202 | { | ||
203 | struct bio_vec *bv; | ||
204 | unsigned i; | ||
205 | |||
206 | __bio_for_each_segment(bv, bio, i, 0) { | ||
207 | unsigned this_count = bv->bv_len; | ||
208 | |||
209 | if (likely(PAGE_SIZE == this_count)) | ||
210 | clear_highpage(bv->bv_page); | ||
211 | else | ||
212 | zero_user(bv->bv_page, bv->bv_offset, this_count); | ||
213 | } | ||
214 | } | ||
215 | |||
171 | int exofs_check_io(struct exofs_io_state *ios, u64 *resid) | 216 | int exofs_check_io(struct exofs_io_state *ios, u64 *resid) |
172 | { | 217 | { |
173 | enum osd_err_priority acumulated_osd_err = 0; | 218 | enum osd_err_priority acumulated_osd_err = 0; |
@@ -176,16 +221,25 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid) | |||
176 | 221 | ||
177 | for (i = 0; i < ios->numdevs; i++) { | 222 | for (i = 0; i < ios->numdevs; i++) { |
178 | struct osd_sense_info osi; | 223 | struct osd_sense_info osi; |
179 | int ret = osd_req_decode_sense(ios->per_dev[i].or, &osi); | 224 | struct osd_request *or = ios->per_dev[i].or; |
225 | int ret; | ||
226 | |||
227 | if (unlikely(!or)) | ||
228 | continue; | ||
180 | 229 | ||
230 | ret = osd_req_decode_sense(or, &osi); | ||
181 | if (likely(!ret)) | 231 | if (likely(!ret)) |
182 | continue; | 232 | continue; |
183 | 233 | ||
184 | if (unlikely(ret == -EFAULT)) { | 234 | if (OSD_ERR_PRI_CLEAR_PAGES == osi.osd_err_pri) { |
185 | EXOFS_DBGMSG("%s: EFAULT Need page clear\n", __func__); | 235 | /* start read offset passed endof file */ |
186 | /*FIXME: All the pages in this device range should: | 236 | _clear_bio(ios->per_dev[i].bio); |
187 | * clear_highpage(page); | 237 | EXOFS_DBGMSG("start read offset passed end of file " |
188 | */ | 238 | "offset=0x%llx, length=0x%llx\n", |
239 | _LLU(ios->per_dev[i].offset), | ||
240 | _LLU(ios->per_dev[i].length)); | ||
241 | |||
242 | continue; /* we recovered */ | ||
189 | } | 243 | } |
190 | 244 | ||
191 | if (osi.osd_err_pri >= acumulated_osd_err) { | 245 | if (osi.osd_err_pri >= acumulated_osd_err) { |
@@ -205,14 +259,259 @@ int exofs_check_io(struct exofs_io_state *ios, u64 *resid) | |||
205 | return acumulated_lin_err; | 259 | return acumulated_lin_err; |
206 | } | 260 | } |
207 | 261 | ||
262 | /* | ||
263 | * L - logical offset into the file | ||
264 | * | ||
265 | * U - The number of bytes in a stripe within a group | ||
266 | * | ||
267 | * U = stripe_unit * group_width | ||
268 | * | ||
269 | * T - The number of bytes striped within a group of component objects | ||
270 | * (before advancing to the next group) | ||
271 | * | ||
272 | * T = stripe_unit * group_width * group_depth | ||
273 | * | ||
274 | * S - The number of bytes striped across all component objects | ||
275 | * before the pattern repeats | ||
276 | * | ||
277 | * S = stripe_unit * group_width * group_depth * group_count | ||
278 | * | ||
279 | * M - The "major" (i.e., across all components) stripe number | ||
280 | * | ||
281 | * M = L / S | ||
282 | * | ||
283 | * G - Counts the groups from the beginning of the major stripe | ||
284 | * | ||
285 | * G = (L - (M * S)) / T [or (L % S) / T] | ||
286 | * | ||
287 | * H - The byte offset within the group | ||
288 | * | ||
289 | * H = (L - (M * S)) % T [or (L % S) % T] | ||
290 | * | ||
291 | * N - The "minor" (i.e., across the group) stripe number | ||
292 | * | ||
293 | * N = H / U | ||
294 | * | ||
295 | * C - The component index coresponding to L | ||
296 | * | ||
297 | * C = (H - (N * U)) / stripe_unit + G * group_width | ||
298 | * [or (L % U) / stripe_unit + G * group_width] | ||
299 | * | ||
300 | * O - The component offset coresponding to L | ||
301 | * | ||
302 | * O = L % stripe_unit + N * stripe_unit + M * group_depth * stripe_unit | ||
303 | */ | ||
304 | struct _striping_info { | ||
305 | u64 obj_offset; | ||
306 | u64 group_length; | ||
307 | u64 total_group_length; | ||
308 | u64 Major; | ||
309 | unsigned dev; | ||
310 | unsigned unit_off; | ||
311 | }; | ||
312 | |||
313 | static void _calc_stripe_info(struct exofs_io_state *ios, u64 file_offset, | ||
314 | struct _striping_info *si) | ||
315 | { | ||
316 | u32 stripe_unit = ios->layout->stripe_unit; | ||
317 | u32 group_width = ios->layout->group_width; | ||
318 | u64 group_depth = ios->layout->group_depth; | ||
319 | |||
320 | u32 U = stripe_unit * group_width; | ||
321 | u64 T = U * group_depth; | ||
322 | u64 S = T * ios->layout->group_count; | ||
323 | u64 M = div64_u64(file_offset, S); | ||
324 | |||
325 | /* | ||
326 | G = (L - (M * S)) / T | ||
327 | H = (L - (M * S)) % T | ||
328 | */ | ||
329 | u64 LmodS = file_offset - M * S; | ||
330 | u32 G = div64_u64(LmodS, T); | ||
331 | u64 H = LmodS - G * T; | ||
332 | |||
333 | u32 N = div_u64(H, U); | ||
334 | |||
335 | /* "H - (N * U)" is just "H % U" so it's bound to u32 */ | ||
336 | si->dev = (u32)(H - (N * U)) / stripe_unit + G * group_width; | ||
337 | si->dev *= ios->layout->mirrors_p1; | ||
338 | |||
339 | div_u64_rem(file_offset, stripe_unit, &si->unit_off); | ||
340 | |||
341 | si->obj_offset = si->unit_off + (N * stripe_unit) + | ||
342 | (M * group_depth * stripe_unit); | ||
343 | |||
344 | si->group_length = T - H; | ||
345 | si->total_group_length = T; | ||
346 | si->Major = M; | ||
347 | } | ||
348 | |||
349 | static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg, | ||
350 | unsigned pgbase, struct exofs_per_dev_state *per_dev, | ||
351 | int cur_len) | ||
352 | { | ||
353 | unsigned pg = *cur_pg; | ||
354 | struct request_queue *q = | ||
355 | osd_request_queue(exofs_ios_od(ios, per_dev->dev)); | ||
356 | |||
357 | per_dev->length += cur_len; | ||
358 | |||
359 | if (per_dev->bio == NULL) { | ||
360 | unsigned pages_in_stripe = ios->layout->group_width * | ||
361 | (ios->layout->stripe_unit / PAGE_SIZE); | ||
362 | unsigned bio_size = (ios->nr_pages + pages_in_stripe) / | ||
363 | ios->layout->group_width; | ||
364 | |||
365 | per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size); | ||
366 | if (unlikely(!per_dev->bio)) { | ||
367 | EXOFS_DBGMSG("Faild to allocate BIO size=%u\n", | ||
368 | bio_size); | ||
369 | return -ENOMEM; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | while (cur_len > 0) { | ||
374 | unsigned pglen = min_t(unsigned, PAGE_SIZE - pgbase, cur_len); | ||
375 | unsigned added_len; | ||
376 | |||
377 | BUG_ON(ios->nr_pages <= pg); | ||
378 | cur_len -= pglen; | ||
379 | |||
380 | added_len = bio_add_pc_page(q, per_dev->bio, ios->pages[pg], | ||
381 | pglen, pgbase); | ||
382 | if (unlikely(pglen != added_len)) | ||
383 | return -ENOMEM; | ||
384 | pgbase = 0; | ||
385 | ++pg; | ||
386 | } | ||
387 | BUG_ON(cur_len); | ||
388 | |||
389 | *cur_pg = pg; | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int _prepare_one_group(struct exofs_io_state *ios, u64 length, | ||
394 | struct _striping_info *si, unsigned first_comp) | ||
395 | { | ||
396 | unsigned stripe_unit = ios->layout->stripe_unit; | ||
397 | unsigned mirrors_p1 = ios->layout->mirrors_p1; | ||
398 | unsigned devs_in_group = ios->layout->group_width * mirrors_p1; | ||
399 | unsigned dev = si->dev; | ||
400 | unsigned first_dev = dev - (dev % devs_in_group); | ||
401 | unsigned comp = first_comp + (dev - first_dev); | ||
402 | unsigned max_comp = ios->numdevs ? ios->numdevs - mirrors_p1 : 0; | ||
403 | unsigned cur_pg = ios->pages_consumed; | ||
404 | int ret = 0; | ||
405 | |||
406 | while (length) { | ||
407 | struct exofs_per_dev_state *per_dev = &ios->per_dev[comp]; | ||
408 | unsigned cur_len, page_off = 0; | ||
409 | |||
410 | if (!per_dev->length) { | ||
411 | per_dev->dev = dev; | ||
412 | if (dev < si->dev) { | ||
413 | per_dev->offset = si->obj_offset + stripe_unit - | ||
414 | si->unit_off; | ||
415 | cur_len = stripe_unit; | ||
416 | } else if (dev == si->dev) { | ||
417 | per_dev->offset = si->obj_offset; | ||
418 | cur_len = stripe_unit - si->unit_off; | ||
419 | page_off = si->unit_off & ~PAGE_MASK; | ||
420 | BUG_ON(page_off && (page_off != ios->pgbase)); | ||
421 | } else { /* dev > si->dev */ | ||
422 | per_dev->offset = si->obj_offset - si->unit_off; | ||
423 | cur_len = stripe_unit; | ||
424 | } | ||
425 | |||
426 | if (max_comp < comp) | ||
427 | max_comp = comp; | ||
428 | |||
429 | dev += mirrors_p1; | ||
430 | dev = (dev % devs_in_group) + first_dev; | ||
431 | } else { | ||
432 | cur_len = stripe_unit; | ||
433 | } | ||
434 | if (cur_len >= length) | ||
435 | cur_len = length; | ||
436 | |||
437 | ret = _add_stripe_unit(ios, &cur_pg, page_off , per_dev, | ||
438 | cur_len); | ||
439 | if (unlikely(ret)) | ||
440 | goto out; | ||
441 | |||
442 | comp += mirrors_p1; | ||
443 | comp = (comp % devs_in_group) + first_comp; | ||
444 | |||
445 | length -= cur_len; | ||
446 | } | ||
447 | out: | ||
448 | ios->numdevs = max_comp + mirrors_p1; | ||
449 | ios->pages_consumed = cur_pg; | ||
450 | return ret; | ||
451 | } | ||
452 | |||
453 | static int _prepare_for_striping(struct exofs_io_state *ios) | ||
454 | { | ||
455 | u64 length = ios->length; | ||
456 | struct _striping_info si; | ||
457 | unsigned devs_in_group = ios->layout->group_width * | ||
458 | ios->layout->mirrors_p1; | ||
459 | unsigned first_comp = 0; | ||
460 | int ret = 0; | ||
461 | |||
462 | _calc_stripe_info(ios, ios->offset, &si); | ||
463 | |||
464 | if (!ios->pages) { | ||
465 | if (ios->kern_buff) { | ||
466 | struct exofs_per_dev_state *per_dev = &ios->per_dev[0]; | ||
467 | |||
468 | per_dev->offset = si.obj_offset; | ||
469 | per_dev->dev = si.dev; | ||
470 | |||
471 | /* no cross device without page array */ | ||
472 | BUG_ON((ios->layout->group_width > 1) && | ||
473 | (si.unit_off + ios->length > | ||
474 | ios->layout->stripe_unit)); | ||
475 | } | ||
476 | ios->numdevs = ios->layout->mirrors_p1; | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | while (length) { | ||
481 | if (length < si.group_length) | ||
482 | si.group_length = length; | ||
483 | |||
484 | ret = _prepare_one_group(ios, si.group_length, &si, first_comp); | ||
485 | if (unlikely(ret)) | ||
486 | goto out; | ||
487 | |||
488 | length -= si.group_length; | ||
489 | |||
490 | si.group_length = si.total_group_length; | ||
491 | si.unit_off = 0; | ||
492 | ++si.Major; | ||
493 | si.obj_offset = si.Major * ios->layout->stripe_unit * | ||
494 | ios->layout->group_depth; | ||
495 | |||
496 | si.dev = (si.dev - (si.dev % devs_in_group)) + devs_in_group; | ||
497 | si.dev %= ios->layout->s_numdevs; | ||
498 | |||
499 | first_comp += devs_in_group; | ||
500 | first_comp %= ios->layout->s_numdevs; | ||
501 | } | ||
502 | |||
503 | out: | ||
504 | return ret; | ||
505 | } | ||
506 | |||
208 | int exofs_sbi_create(struct exofs_io_state *ios) | 507 | int exofs_sbi_create(struct exofs_io_state *ios) |
209 | { | 508 | { |
210 | int i, ret; | 509 | int i, ret; |
211 | 510 | ||
212 | for (i = 0; i < ios->sbi->s_numdevs; i++) { | 511 | for (i = 0; i < ios->layout->s_numdevs; i++) { |
213 | struct osd_request *or; | 512 | struct osd_request *or; |
214 | 513 | ||
215 | or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); | 514 | or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL); |
216 | if (unlikely(!or)) { | 515 | if (unlikely(!or)) { |
217 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); | 516 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); |
218 | ret = -ENOMEM; | 517 | ret = -ENOMEM; |
@@ -233,10 +532,10 @@ int exofs_sbi_remove(struct exofs_io_state *ios) | |||
233 | { | 532 | { |
234 | int i, ret; | 533 | int i, ret; |
235 | 534 | ||
236 | for (i = 0; i < ios->sbi->s_numdevs; i++) { | 535 | for (i = 0; i < ios->layout->s_numdevs; i++) { |
237 | struct osd_request *or; | 536 | struct osd_request *or; |
238 | 537 | ||
239 | or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); | 538 | or = osd_start_request(exofs_ios_od(ios, i), GFP_KERNEL); |
240 | if (unlikely(!or)) { | 539 | if (unlikely(!or)) { |
241 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); | 540 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); |
242 | ret = -ENOMEM; | 541 | ret = -ENOMEM; |
@@ -253,51 +552,74 @@ out: | |||
253 | return ret; | 552 | return ret; |
254 | } | 553 | } |
255 | 554 | ||
256 | int exofs_sbi_write(struct exofs_io_state *ios) | 555 | static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp) |
257 | { | 556 | { |
258 | int i, ret; | 557 | struct exofs_per_dev_state *master_dev = &ios->per_dev[cur_comp]; |
558 | unsigned dev = ios->per_dev[cur_comp].dev; | ||
559 | unsigned last_comp = cur_comp + ios->layout->mirrors_p1; | ||
560 | int ret = 0; | ||
259 | 561 | ||
260 | for (i = 0; i < ios->sbi->s_numdevs; i++) { | 562 | if (ios->pages && !master_dev->length) |
563 | return 0; /* Just an empty slot */ | ||
564 | |||
565 | for (; cur_comp < last_comp; ++cur_comp, ++dev) { | ||
566 | struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; | ||
261 | struct osd_request *or; | 567 | struct osd_request *or; |
262 | 568 | ||
263 | or = osd_start_request(ios->sbi->s_ods[i], GFP_KERNEL); | 569 | or = osd_start_request(exofs_ios_od(ios, dev), GFP_KERNEL); |
264 | if (unlikely(!or)) { | 570 | if (unlikely(!or)) { |
265 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); | 571 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); |
266 | ret = -ENOMEM; | 572 | ret = -ENOMEM; |
267 | goto out; | 573 | goto out; |
268 | } | 574 | } |
269 | ios->per_dev[i].or = or; | 575 | per_dev->or = or; |
270 | ios->numdevs++; | 576 | per_dev->offset = master_dev->offset; |
271 | 577 | ||
272 | if (ios->bio) { | 578 | if (ios->pages) { |
273 | struct bio *bio; | 579 | struct bio *bio; |
274 | 580 | ||
275 | if (i != 0) { | 581 | if (per_dev != master_dev) { |
276 | bio = bio_kmalloc(GFP_KERNEL, | 582 | bio = bio_kmalloc(GFP_KERNEL, |
277 | ios->bio->bi_max_vecs); | 583 | master_dev->bio->bi_max_vecs); |
278 | if (unlikely(!bio)) { | 584 | if (unlikely(!bio)) { |
585 | EXOFS_DBGMSG( | ||
586 | "Faild to allocate BIO size=%u\n", | ||
587 | master_dev->bio->bi_max_vecs); | ||
279 | ret = -ENOMEM; | 588 | ret = -ENOMEM; |
280 | goto out; | 589 | goto out; |
281 | } | 590 | } |
282 | 591 | ||
283 | __bio_clone(bio, ios->bio); | 592 | __bio_clone(bio, master_dev->bio); |
284 | bio->bi_bdev = NULL; | 593 | bio->bi_bdev = NULL; |
285 | bio->bi_next = NULL; | 594 | bio->bi_next = NULL; |
286 | ios->per_dev[i].bio = bio; | 595 | per_dev->length = master_dev->length; |
596 | per_dev->bio = bio; | ||
597 | per_dev->dev = dev; | ||
287 | } else { | 598 | } else { |
288 | bio = ios->bio; | 599 | bio = master_dev->bio; |
600 | /* FIXME: bio_set_dir() */ | ||
601 | bio->bi_rw |= (1 << BIO_RW); | ||
289 | } | 602 | } |
290 | 603 | ||
291 | osd_req_write(or, &ios->obj, ios->offset, bio, | 604 | osd_req_write(or, &ios->obj, per_dev->offset, bio, |
292 | ios->length); | 605 | per_dev->length); |
293 | /* EXOFS_DBGMSG("write sync=%d\n", sync);*/ | 606 | EXOFS_DBGMSG("write(0x%llx) offset=0x%llx " |
607 | "length=0x%llx dev=%d\n", | ||
608 | _LLU(ios->obj.id), _LLU(per_dev->offset), | ||
609 | _LLU(per_dev->length), dev); | ||
294 | } else if (ios->kern_buff) { | 610 | } else if (ios->kern_buff) { |
295 | osd_req_write_kern(or, &ios->obj, ios->offset, | 611 | ret = osd_req_write_kern(or, &ios->obj, per_dev->offset, |
296 | ios->kern_buff, ios->length); | 612 | ios->kern_buff, ios->length); |
297 | /* EXOFS_DBGMSG("write_kern sync=%d\n", sync);*/ | 613 | if (unlikely(ret)) |
614 | goto out; | ||
615 | EXOFS_DBGMSG2("write_kern(0x%llx) offset=0x%llx " | ||
616 | "length=0x%llx dev=%d\n", | ||
617 | _LLU(ios->obj.id), _LLU(per_dev->offset), | ||
618 | _LLU(ios->length), dev); | ||
298 | } else { | 619 | } else { |
299 | osd_req_set_attributes(or, &ios->obj); | 620 | osd_req_set_attributes(or, &ios->obj); |
300 | /* EXOFS_DBGMSG("set_attributes sync=%d\n", sync);*/ | 621 | EXOFS_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n", |
622 | _LLU(ios->obj.id), ios->out_attr_len, dev); | ||
301 | } | 623 | } |
302 | 624 | ||
303 | if (ios->out_attr) | 625 | if (ios->out_attr) |
@@ -308,54 +630,93 @@ int exofs_sbi_write(struct exofs_io_state *ios) | |||
308 | osd_req_add_get_attr_list(or, ios->in_attr, | 630 | osd_req_add_get_attr_list(or, ios->in_attr, |
309 | ios->in_attr_len); | 631 | ios->in_attr_len); |
310 | } | 632 | } |
311 | ret = exofs_io_execute(ios); | ||
312 | 633 | ||
313 | out: | 634 | out: |
314 | return ret; | 635 | return ret; |
315 | } | 636 | } |
316 | 637 | ||
317 | int exofs_sbi_read(struct exofs_io_state *ios) | 638 | int exofs_sbi_write(struct exofs_io_state *ios) |
318 | { | 639 | { |
319 | int i, ret; | 640 | int i; |
641 | int ret; | ||
320 | 642 | ||
321 | for (i = 0; i < 1; i++) { | 643 | ret = _prepare_for_striping(ios); |
322 | struct osd_request *or; | 644 | if (unlikely(ret)) |
323 | unsigned first_dev = (unsigned)ios->obj.id; | 645 | return ret; |
324 | 646 | ||
325 | first_dev %= ios->sbi->s_numdevs; | 647 | for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) { |
326 | or = osd_start_request(ios->sbi->s_ods[first_dev], GFP_KERNEL); | 648 | ret = _sbi_write_mirror(ios, i); |
327 | if (unlikely(!or)) { | 649 | if (unlikely(ret)) |
328 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); | 650 | return ret; |
329 | ret = -ENOMEM; | 651 | } |
330 | goto out; | ||
331 | } | ||
332 | ios->per_dev[i].or = or; | ||
333 | ios->numdevs++; | ||
334 | 652 | ||
335 | if (ios->bio) { | 653 | ret = exofs_io_execute(ios); |
336 | osd_req_read(or, &ios->obj, ios->offset, ios->bio, | 654 | return ret; |
337 | ios->length); | 655 | } |
338 | /* EXOFS_DBGMSG("read sync=%d\n", sync);*/ | ||
339 | } else if (ios->kern_buff) { | ||
340 | osd_req_read_kern(or, &ios->obj, ios->offset, | ||
341 | ios->kern_buff, ios->length); | ||
342 | /* EXOFS_DBGMSG("read_kern sync=%d\n", sync);*/ | ||
343 | } else { | ||
344 | osd_req_get_attributes(or, &ios->obj); | ||
345 | /* EXOFS_DBGMSG("get_attributes sync=%d\n", sync);*/ | ||
346 | } | ||
347 | 656 | ||
348 | if (ios->out_attr) | 657 | static int _sbi_read_mirror(struct exofs_io_state *ios, unsigned cur_comp) |
349 | osd_req_add_set_attr_list(or, ios->out_attr, | 658 | { |
350 | ios->out_attr_len); | 659 | struct osd_request *or; |
660 | struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; | ||
661 | unsigned first_dev = (unsigned)ios->obj.id; | ||
351 | 662 | ||
352 | if (ios->in_attr) | 663 | if (ios->pages && !per_dev->length) |
353 | osd_req_add_get_attr_list(or, ios->in_attr, | 664 | return 0; /* Just an empty slot */ |
354 | ios->in_attr_len); | 665 | |
666 | first_dev = per_dev->dev + first_dev % ios->layout->mirrors_p1; | ||
667 | or = osd_start_request(exofs_ios_od(ios, first_dev), GFP_KERNEL); | ||
668 | if (unlikely(!or)) { | ||
669 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); | ||
670 | return -ENOMEM; | ||
355 | } | 671 | } |
356 | ret = exofs_io_execute(ios); | 672 | per_dev->or = or; |
673 | |||
674 | if (ios->pages) { | ||
675 | osd_req_read(or, &ios->obj, per_dev->offset, | ||
676 | per_dev->bio, per_dev->length); | ||
677 | EXOFS_DBGMSG("read(0x%llx) offset=0x%llx length=0x%llx" | ||
678 | " dev=%d\n", _LLU(ios->obj.id), | ||
679 | _LLU(per_dev->offset), _LLU(per_dev->length), | ||
680 | first_dev); | ||
681 | } else if (ios->kern_buff) { | ||
682 | int ret = osd_req_read_kern(or, &ios->obj, per_dev->offset, | ||
683 | ios->kern_buff, ios->length); | ||
684 | EXOFS_DBGMSG2("read_kern(0x%llx) offset=0x%llx " | ||
685 | "length=0x%llx dev=%d ret=>%d\n", | ||
686 | _LLU(ios->obj.id), _LLU(per_dev->offset), | ||
687 | _LLU(ios->length), first_dev, ret); | ||
688 | if (unlikely(ret)) | ||
689 | return ret; | ||
690 | } else { | ||
691 | osd_req_get_attributes(or, &ios->obj); | ||
692 | EXOFS_DBGMSG2("obj(0x%llx) get_attributes=%d dev=%d\n", | ||
693 | _LLU(ios->obj.id), ios->in_attr_len, first_dev); | ||
694 | } | ||
695 | if (ios->out_attr) | ||
696 | osd_req_add_set_attr_list(or, ios->out_attr, ios->out_attr_len); | ||
357 | 697 | ||
358 | out: | 698 | if (ios->in_attr) |
699 | osd_req_add_get_attr_list(or, ios->in_attr, ios->in_attr_len); | ||
700 | |||
701 | return 0; | ||
702 | } | ||
703 | |||
704 | int exofs_sbi_read(struct exofs_io_state *ios) | ||
705 | { | ||
706 | int i; | ||
707 | int ret; | ||
708 | |||
709 | ret = _prepare_for_striping(ios); | ||
710 | if (unlikely(ret)) | ||
711 | return ret; | ||
712 | |||
713 | for (i = 0; i < ios->numdevs; i += ios->layout->mirrors_p1) { | ||
714 | ret = _sbi_read_mirror(ios, i); | ||
715 | if (unlikely(ret)) | ||
716 | return ret; | ||
717 | } | ||
718 | |||
719 | ret = exofs_io_execute(ios); | ||
359 | return ret; | 720 | return ret; |
360 | } | 721 | } |
361 | 722 | ||
@@ -380,42 +741,82 @@ int extract_attr_from_ios(struct exofs_io_state *ios, struct osd_attr *attr) | |||
380 | return -EIO; | 741 | return -EIO; |
381 | } | 742 | } |
382 | 743 | ||
744 | static int _truncate_mirrors(struct exofs_io_state *ios, unsigned cur_comp, | ||
745 | struct osd_attr *attr) | ||
746 | { | ||
747 | int last_comp = cur_comp + ios->layout->mirrors_p1; | ||
748 | |||
749 | for (; cur_comp < last_comp; ++cur_comp) { | ||
750 | struct exofs_per_dev_state *per_dev = &ios->per_dev[cur_comp]; | ||
751 | struct osd_request *or; | ||
752 | |||
753 | or = osd_start_request(exofs_ios_od(ios, cur_comp), GFP_KERNEL); | ||
754 | if (unlikely(!or)) { | ||
755 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); | ||
756 | return -ENOMEM; | ||
757 | } | ||
758 | per_dev->or = or; | ||
759 | |||
760 | osd_req_set_attributes(or, &ios->obj); | ||
761 | osd_req_add_set_attr_list(or, attr, 1); | ||
762 | } | ||
763 | |||
764 | return 0; | ||
765 | } | ||
766 | |||
383 | int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) | 767 | int exofs_oi_truncate(struct exofs_i_info *oi, u64 size) |
384 | { | 768 | { |
385 | struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info; | 769 | struct exofs_sb_info *sbi = oi->vfs_inode.i_sb->s_fs_info; |
386 | struct exofs_io_state *ios; | 770 | struct exofs_io_state *ios; |
387 | struct osd_attr attr; | 771 | struct exofs_trunc_attr { |
388 | __be64 newsize; | 772 | struct osd_attr attr; |
773 | __be64 newsize; | ||
774 | } *size_attrs; | ||
775 | struct _striping_info si; | ||
389 | int i, ret; | 776 | int i, ret; |
390 | 777 | ||
391 | if (exofs_get_io_state(sbi, &ios)) | 778 | ret = exofs_get_io_state(&sbi->layout, &ios); |
392 | return -ENOMEM; | 779 | if (unlikely(ret)) |
780 | return ret; | ||
781 | |||
782 | size_attrs = kcalloc(ios->layout->group_width, sizeof(*size_attrs), | ||
783 | GFP_KERNEL); | ||
784 | if (unlikely(!size_attrs)) { | ||
785 | ret = -ENOMEM; | ||
786 | goto out; | ||
787 | } | ||
393 | 788 | ||
394 | ios->obj.id = exofs_oi_objno(oi); | 789 | ios->obj.id = exofs_oi_objno(oi); |
395 | ios->cred = oi->i_cred; | 790 | ios->cred = oi->i_cred; |
396 | 791 | ||
397 | newsize = cpu_to_be64(size); | 792 | ios->numdevs = ios->layout->s_numdevs; |
398 | attr = g_attr_logical_length; | 793 | _calc_stripe_info(ios, size, &si); |
399 | attr.val_ptr = &newsize; | ||
400 | 794 | ||
401 | for (i = 0; i < sbi->s_numdevs; i++) { | 795 | for (i = 0; i < ios->layout->group_width; ++i) { |
402 | struct osd_request *or; | 796 | struct exofs_trunc_attr *size_attr = &size_attrs[i]; |
797 | u64 obj_size; | ||
403 | 798 | ||
404 | or = osd_start_request(sbi->s_ods[i], GFP_KERNEL); | 799 | if (i < si.dev) |
405 | if (unlikely(!or)) { | 800 | obj_size = si.obj_offset + |
406 | EXOFS_ERR("%s: osd_start_request failed\n", __func__); | 801 | ios->layout->stripe_unit - si.unit_off; |
407 | ret = -ENOMEM; | 802 | else if (i == si.dev) |
408 | goto out; | 803 | obj_size = si.obj_offset; |
409 | } | 804 | else /* i > si.dev */ |
410 | ios->per_dev[i].or = or; | 805 | obj_size = si.obj_offset - si.unit_off; |
411 | ios->numdevs++; | ||
412 | 806 | ||
413 | osd_req_set_attributes(or, &ios->obj); | 807 | size_attr->newsize = cpu_to_be64(obj_size); |
414 | osd_req_add_set_attr_list(or, &attr, 1); | 808 | size_attr->attr = g_attr_logical_length; |
809 | size_attr->attr.val_ptr = &size_attr->newsize; | ||
810 | |||
811 | ret = _truncate_mirrors(ios, i * ios->layout->mirrors_p1, | ||
812 | &size_attr->attr); | ||
813 | if (unlikely(ret)) | ||
814 | goto out; | ||
415 | } | 815 | } |
416 | ret = exofs_io_execute(ios); | 816 | ret = exofs_io_execute(ios); |
417 | 817 | ||
418 | out: | 818 | out: |
819 | kfree(size_attrs); | ||
419 | exofs_put_io_state(ios); | 820 | exofs_put_io_state(ios); |
420 | return ret; | 821 | return ret; |
421 | } | 822 | } |