diff options
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/dm-exception-store.c | 102 |
1 files changed, 68 insertions, 34 deletions
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c index 769ab677f8e0..fe6cef8df203 100644 --- a/drivers/md/dm-exception-store.c +++ b/drivers/md/dm-exception-store.c | |||
@@ -105,6 +105,11 @@ struct pstore { | |||
105 | void *area; | 105 | void *area; |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * An area of zeros used to clear the next area. | ||
109 | */ | ||
110 | void *zero_area; | ||
111 | |||
112 | /* | ||
108 | * Used to keep track of which metadata area the data in | 113 | * Used to keep track of which metadata area the data in |
109 | * 'chunk' refers to. | 114 | * 'chunk' refers to. |
110 | */ | 115 | */ |
@@ -149,6 +154,13 @@ static int alloc_area(struct pstore *ps) | |||
149 | if (!ps->area) | 154 | if (!ps->area) |
150 | return r; | 155 | return r; |
151 | 156 | ||
157 | ps->zero_area = vmalloc(len); | ||
158 | if (!ps->zero_area) { | ||
159 | vfree(ps->area); | ||
160 | return r; | ||
161 | } | ||
162 | memset(ps->zero_area, 0, len); | ||
163 | |||
152 | return 0; | 164 | return 0; |
153 | } | 165 | } |
154 | 166 | ||
@@ -156,6 +168,8 @@ static void free_area(struct pstore *ps) | |||
156 | { | 168 | { |
157 | vfree(ps->area); | 169 | vfree(ps->area); |
158 | ps->area = NULL; | 170 | ps->area = NULL; |
171 | vfree(ps->zero_area); | ||
172 | ps->zero_area = NULL; | ||
159 | } | 173 | } |
160 | 174 | ||
161 | struct mdata_req { | 175 | struct mdata_req { |
@@ -220,25 +234,41 @@ static chunk_t area_location(struct pstore *ps, chunk_t area) | |||
220 | * Read or write a metadata area. Remembering to skip the first | 234 | * Read or write a metadata area. Remembering to skip the first |
221 | * chunk which holds the header. | 235 | * chunk which holds the header. |
222 | */ | 236 | */ |
223 | static int area_io(struct pstore *ps, chunk_t area, int rw) | 237 | static int area_io(struct pstore *ps, int rw) |
224 | { | 238 | { |
225 | int r; | 239 | int r; |
226 | chunk_t chunk; | 240 | chunk_t chunk; |
227 | 241 | ||
228 | chunk = area_location(ps, area); | 242 | chunk = area_location(ps, ps->current_area); |
229 | 243 | ||
230 | r = chunk_io(ps, chunk, rw, 0); | 244 | r = chunk_io(ps, chunk, rw, 0); |
231 | if (r) | 245 | if (r) |
232 | return r; | 246 | return r; |
233 | 247 | ||
234 | ps->current_area = area; | ||
235 | return 0; | 248 | return 0; |
236 | } | 249 | } |
237 | 250 | ||
238 | static int zero_area(struct pstore *ps, chunk_t area) | 251 | static void zero_memory_area(struct pstore *ps) |
239 | { | 252 | { |
240 | memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); | 253 | memset(ps->area, 0, ps->snap->chunk_size << SECTOR_SHIFT); |
241 | return area_io(ps, area, WRITE); | 254 | } |
255 | |||
256 | static int zero_disk_area(struct pstore *ps, chunk_t area) | ||
257 | { | ||
258 | struct dm_io_region where = { | ||
259 | .bdev = ps->snap->cow->bdev, | ||
260 | .sector = ps->snap->chunk_size * area_location(ps, area), | ||
261 | .count = ps->snap->chunk_size, | ||
262 | }; | ||
263 | struct dm_io_request io_req = { | ||
264 | .bi_rw = WRITE, | ||
265 | .mem.type = DM_IO_VMA, | ||
266 | .mem.ptr.vma = ps->zero_area, | ||
267 | .client = ps->io_client, | ||
268 | .notify.fn = NULL, | ||
269 | }; | ||
270 | |||
271 | return dm_io(&io_req, 1, &where, NULL); | ||
242 | } | 272 | } |
243 | 273 | ||
244 | static int read_header(struct pstore *ps, int *new_snapshot) | 274 | static int read_header(struct pstore *ps, int *new_snapshot) |
@@ -411,15 +441,14 @@ static int insert_exceptions(struct pstore *ps, int *full) | |||
411 | 441 | ||
412 | static int read_exceptions(struct pstore *ps) | 442 | static int read_exceptions(struct pstore *ps) |
413 | { | 443 | { |
414 | chunk_t area; | ||
415 | int r, full = 1; | 444 | int r, full = 1; |
416 | 445 | ||
417 | /* | 446 | /* |
418 | * Keeping reading chunks and inserting exceptions until | 447 | * Keeping reading chunks and inserting exceptions until |
419 | * we find a partially full area. | 448 | * we find a partially full area. |
420 | */ | 449 | */ |
421 | for (area = 0; full; area++) { | 450 | for (ps->current_area = 0; full; ps->current_area++) { |
422 | r = area_io(ps, area, READ); | 451 | r = area_io(ps, READ); |
423 | if (r) | 452 | if (r) |
424 | return r; | 453 | return r; |
425 | 454 | ||
@@ -428,6 +457,8 @@ static int read_exceptions(struct pstore *ps) | |||
428 | return r; | 457 | return r; |
429 | } | 458 | } |
430 | 459 | ||
460 | ps->current_area--; | ||
461 | |||
431 | return 0; | 462 | return 0; |
432 | } | 463 | } |
433 | 464 | ||
@@ -486,12 +517,13 @@ static int persistent_read_metadata(struct exception_store *store) | |||
486 | return r; | 517 | return r; |
487 | } | 518 | } |
488 | 519 | ||
489 | r = zero_area(ps, 0); | 520 | ps->current_area = 0; |
521 | zero_memory_area(ps); | ||
522 | r = zero_disk_area(ps, 0); | ||
490 | if (r) { | 523 | if (r) { |
491 | DMWARN("zero_area(0) failed"); | 524 | DMWARN("zero_disk_area(0) failed"); |
492 | return r; | 525 | return r; |
493 | } | 526 | } |
494 | |||
495 | } else { | 527 | } else { |
496 | /* | 528 | /* |
497 | * Sanity checks. | 529 | * Sanity checks. |
@@ -551,7 +583,6 @@ static void persistent_commit(struct exception_store *store, | |||
551 | void (*callback) (void *, int success), | 583 | void (*callback) (void *, int success), |
552 | void *callback_context) | 584 | void *callback_context) |
553 | { | 585 | { |
554 | int r; | ||
555 | unsigned int i; | 586 | unsigned int i; |
556 | struct pstore *ps = get_info(store); | 587 | struct pstore *ps = get_info(store); |
557 | struct disk_exception de; | 588 | struct disk_exception de; |
@@ -572,33 +603,36 @@ static void persistent_commit(struct exception_store *store, | |||
572 | cb->context = callback_context; | 603 | cb->context = callback_context; |
573 | 604 | ||
574 | /* | 605 | /* |
575 | * If there are no more exceptions in flight, or we have | 606 | * If there are exceptions in flight and we have not yet |
576 | * filled this metadata area we commit the exceptions to | 607 | * filled this metadata area there's nothing more to do. |
577 | * disk. | ||
578 | */ | 608 | */ |
579 | if (atomic_dec_and_test(&ps->pending_count) || | 609 | if (!atomic_dec_and_test(&ps->pending_count) && |
580 | (ps->current_committed == ps->exceptions_per_area)) { | 610 | (ps->current_committed != ps->exceptions_per_area)) |
581 | r = area_io(ps, ps->current_area, WRITE); | 611 | return; |
582 | if (r) | ||
583 | ps->valid = 0; | ||
584 | 612 | ||
585 | /* | 613 | /* |
586 | * Have we completely filled the current area ? | 614 | * Commit exceptions to disk. |
587 | */ | 615 | */ |
588 | if (ps->current_committed == ps->exceptions_per_area) { | 616 | if (area_io(ps, WRITE)) |
589 | ps->current_committed = 0; | 617 | ps->valid = 0; |
590 | r = zero_area(ps, ps->current_area + 1); | ||
591 | if (r) | ||
592 | ps->valid = 0; | ||
593 | } | ||
594 | 618 | ||
595 | for (i = 0; i < ps->callback_count; i++) { | 619 | /* |
596 | cb = ps->callbacks + i; | 620 | * Advance to the next area if this one is full. |
597 | cb->callback(cb->context, r == 0 ? 1 : 0); | 621 | */ |
598 | } | 622 | if (ps->current_committed == ps->exceptions_per_area) { |
623 | if (zero_disk_area(ps, ps->current_area + 1)) | ||
624 | ps->valid = 0; | ||
625 | ps->current_committed = 0; | ||
626 | ps->current_area++; | ||
627 | zero_memory_area(ps); | ||
628 | } | ||
599 | 629 | ||
600 | ps->callback_count = 0; | 630 | for (i = 0; i < ps->callback_count; i++) { |
631 | cb = ps->callbacks + i; | ||
632 | cb->callback(cb->context, ps->valid); | ||
601 | } | 633 | } |
634 | |||
635 | ps->callback_count = 0; | ||
602 | } | 636 | } |
603 | 637 | ||
604 | static void persistent_drop(struct exception_store *store) | 638 | static void persistent_drop(struct exception_store *store) |