aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2009-09-04 15:40:39 -0400
committerAlasdair G Kergon <agk@redhat.com>2009-09-04 15:40:39 -0400
commit61578dcd3fafe6babd72e8db32110cc0b630a432 (patch)
tree8e5bb9e66ec43666dd5079cc9203fc56c233b469
parent02d2fd31defce6ff77146ad0fef4f19006055d86 (diff)
dm snapshot: fix header corruption race on invalidation
If a persistent snapshot fills up, a race can corrupt the on-disk header which causes a crash on any future attempt to activate the snapshot (typically while booting). This patch fixes the race. When the snapshot overflows, __invalidate_snapshot is called, which calls snapshot store method drop_snapshot. It goes to persistent_drop_snapshot that calls write_header. write_header constructs the new header in the "area" location. Concurrently, an existing kcopyd job may finish, call copy_callback and commit_exception method, that goes to persistent_commit_exception. persistent_commit_exception doesn't do locking, relying on the fact that callbacks are single-threaded, but it can race with snapshot invalidation and overwrite the header that is just being written while the snapshot is being invalidated. The result of this race is a corrupted header being written that can lead to a crash on further reactivation (if chunk_size is zero in the corrupted header). The fix is to use separate memory areas for each. See the bug: https://bugzilla.redhat.com/show_bug.cgi?id=461506 Cc: stable@kernel.org Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com>
-rw-r--r--drivers/md/dm-snap-persistent.c44
1 files changed, 34 insertions, 10 deletions
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index 2a3d626a98d9..5d1a97580cb7 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -106,6 +106,13 @@ struct pstore {
106 void *zero_area; 106 void *zero_area;
107 107
108 /* 108 /*
109 * An area used for header. The header can be written
110 * concurrently with metadata (when invalidating the snapshot),
111 * so it needs a separate buffer.
112 */
113 void *header_area;
114
115 /*
109 * Used to keep track of which metadata area the data in 116 * Used to keep track of which metadata area the data in
110 * 'chunk' refers to. 117 * 'chunk' refers to.
111 */ 118 */
@@ -148,16 +155,27 @@ static int alloc_area(struct pstore *ps)
148 */ 155 */
149 ps->area = vmalloc(len); 156 ps->area = vmalloc(len);
150 if (!ps->area) 157 if (!ps->area)
151 return r; 158 goto err_area;
152 159
153 ps->zero_area = vmalloc(len); 160 ps->zero_area = vmalloc(len);
154 if (!ps->zero_area) { 161 if (!ps->zero_area)
155 vfree(ps->area); 162 goto err_zero_area;
156 return r;
157 }
158 memset(ps->zero_area, 0, len); 163 memset(ps->zero_area, 0, len);
159 164
165 ps->header_area = vmalloc(len);
166 if (!ps->header_area)
167 goto err_header_area;
168
160 return 0; 169 return 0;
170
171err_header_area:
172 vfree(ps->zero_area);
173
174err_zero_area:
175 vfree(ps->area);
176
177err_area:
178 return r;
161} 179}
162 180
163static void free_area(struct pstore *ps) 181static void free_area(struct pstore *ps)
@@ -169,6 +187,10 @@ static void free_area(struct pstore *ps)
169 if (ps->zero_area) 187 if (ps->zero_area)
170 vfree(ps->zero_area); 188 vfree(ps->zero_area);
171 ps->zero_area = NULL; 189 ps->zero_area = NULL;
190
191 if (ps->header_area)
192 vfree(ps->header_area);
193 ps->header_area = NULL;
172} 194}
173 195
174struct mdata_req { 196struct mdata_req {
@@ -285,11 +307,11 @@ static int read_header(struct pstore *ps, int *new_snapshot)
285 if (r) 307 if (r)
286 return r; 308 return r;
287 309
288 r = chunk_io(ps, ps->area, 0, READ, 1); 310 r = chunk_io(ps, ps->header_area, 0, READ, 1);
289 if (r) 311 if (r)
290 goto bad; 312 goto bad;
291 313
292 dh = (struct disk_header *) ps->area; 314 dh = ps->header_area;
293 315
294 if (le32_to_cpu(dh->magic) == 0) { 316 if (le32_to_cpu(dh->magic) == 0) {
295 *new_snapshot = 1; 317 *new_snapshot = 1;
@@ -339,15 +361,15 @@ static int write_header(struct pstore *ps)
339{ 361{
340 struct disk_header *dh; 362 struct disk_header *dh;
341 363
342 memset(ps->area, 0, ps->store->chunk_size << SECTOR_SHIFT); 364 memset(ps->header_area, 0, ps->store->chunk_size << SECTOR_SHIFT);
343 365
344 dh = (struct disk_header *) ps->area; 366 dh = ps->header_area;
345 dh->magic = cpu_to_le32(SNAP_MAGIC); 367 dh->magic = cpu_to_le32(SNAP_MAGIC);
346 dh->valid = cpu_to_le32(ps->valid); 368 dh->valid = cpu_to_le32(ps->valid);
347 dh->version = cpu_to_le32(ps->version); 369 dh->version = cpu_to_le32(ps->version);
348 dh->chunk_size = cpu_to_le32(ps->store->chunk_size); 370 dh->chunk_size = cpu_to_le32(ps->store->chunk_size);
349 371
350 return chunk_io(ps, ps->area, 0, WRITE, 1); 372 return chunk_io(ps, ps->header_area, 0, WRITE, 1);
351} 373}
352 374
353/* 375/*
@@ -667,6 +689,8 @@ static int persistent_ctr(struct dm_exception_store *store,
667 ps->valid = 1; 689 ps->valid = 1;
668 ps->version = SNAPSHOT_DISK_VERSION; 690 ps->version = SNAPSHOT_DISK_VERSION;
669 ps->area = NULL; 691 ps->area = NULL;
692 ps->zero_area = NULL;
693 ps->header_area = NULL;
670 ps->next_free = 2; /* skipping the header and first area */ 694 ps->next_free = 2; /* skipping the header and first area */
671 ps->current_committed = 0; 695 ps->current_committed = 0;
672 696