aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMilan Broz <mbroz@redhat.com>2007-07-12 12:28:00 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-12 18:01:08 -0400
commitfcac03abd325e4f7a4cc8fe05fea2793b1c8eb75 (patch)
tree79965ed6b2b03468e629a63fca1c14f795091436 /drivers/md
parent596f138eede0c113aa655937c8be85fc15ccd61c (diff)
dm snapshot: fix invalidation deadlock
Process persistent exception store metadata IOs in a separate thread. A snapshot may become invalid while inside generic_make_request(). A synchronous write is then needed to update the metadata while still inside that function. Since the introduction of md-dm-reduce-stack-usage-with-stacked-block-devices.patch this has to be performed by a separate thread to avoid deadlock. Signed-off-by: Milan Broz <mbroz@redhat.com> Signed-off-by: Alasdair G Kergon <agk@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-exception-store.c48
1 files changed, 43 insertions, 5 deletions
diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c
index cb05b744deaa..8c25c2ff724a 100644
--- a/drivers/md/dm-exception-store.c
+++ b/drivers/md/dm-exception-store.c
@@ -125,6 +125,8 @@ struct pstore {
125 uint32_t callback_count; 125 uint32_t callback_count;
126 struct commit_callback *callbacks; 126 struct commit_callback *callbacks;
127 struct dm_io_client *io_client; 127 struct dm_io_client *io_client;
128
129 struct workqueue_struct *metadata_wq;
128}; 130};
129 131
130static unsigned sectors_to_pages(unsigned sectors) 132static unsigned sectors_to_pages(unsigned sectors)
@@ -156,10 +158,24 @@ static void free_area(struct pstore *ps)
156 ps->area = NULL; 158 ps->area = NULL;
157} 159}
158 160
161struct mdata_req {
162 struct io_region *where;
163 struct dm_io_request *io_req;
164 struct work_struct work;
165 int result;
166};
167
168static void do_metadata(struct work_struct *work)
169{
170 struct mdata_req *req = container_of(work, struct mdata_req, work);
171
172 req->result = dm_io(req->io_req, 1, req->where, NULL);
173}
174
159/* 175/*
160 * Read or write a chunk aligned and sized block of data from a device. 176 * Read or write a chunk aligned and sized block of data from a device.
161 */ 177 */
162static int chunk_io(struct pstore *ps, uint32_t chunk, int rw) 178static int chunk_io(struct pstore *ps, uint32_t chunk, int rw, int metadata)
163{ 179{
164 struct io_region where = { 180 struct io_region where = {
165 .bdev = ps->snap->cow->bdev, 181 .bdev = ps->snap->cow->bdev,
@@ -173,8 +189,23 @@ static int chunk_io(struct pstore *ps, uint32_t chunk, int rw)
173 .client = ps->io_client, 189 .client = ps->io_client,
174 .notify.fn = NULL, 190 .notify.fn = NULL,
175 }; 191 };
192 struct mdata_req req;
193
194 if (!metadata)
195 return dm_io(&io_req, 1, &where, NULL);
176 196
177 return dm_io(&io_req, 1, &where, NULL); 197 req.where = &where;
198 req.io_req = &io_req;
199
200 /*
201 * Issue the synchronous I/O from a different thread
202 * to avoid generic_make_request recursion.
203 */
204 INIT_WORK(&req.work, do_metadata);
205 queue_work(ps->metadata_wq, &req.work);
206 flush_workqueue(ps->metadata_wq);
207
208 return req.result;
178} 209}
179 210
180/* 211/*
@@ -189,7 +220,7 @@ static int area_io(struct pstore *ps, uint32_t area, int rw)
189 /* convert a metadata area index to a chunk index */ 220 /* convert a metadata area index to a chunk index */
190 chunk = 1 + ((ps->exceptions_per_area + 1) * area); 221 chunk = 1 + ((ps->exceptions_per_area + 1) * area);
191 222
192 r = chunk_io(ps, chunk, rw); 223 r = chunk_io(ps, chunk, rw, 0);
193 if (r) 224 if (r)
194 return r; 225 return r;
195 226
@@ -230,7 +261,7 @@ static int read_header(struct pstore *ps, int *new_snapshot)
230 if (r) 261 if (r)
231 return r; 262 return r;
232 263
233 r = chunk_io(ps, 0, READ); 264 r = chunk_io(ps, 0, READ, 1);
234 if (r) 265 if (r)
235 goto bad; 266 goto bad;
236 267
@@ -292,7 +323,7 @@ static int write_header(struct pstore *ps)
292 dh->version = cpu_to_le32(ps->version); 323 dh->version = cpu_to_le32(ps->version);
293 dh->chunk_size = cpu_to_le32(ps->snap->chunk_size); 324 dh->chunk_size = cpu_to_le32(ps->snap->chunk_size);
294 325
295 return chunk_io(ps, 0, WRITE); 326 return chunk_io(ps, 0, WRITE, 1);
296} 327}
297 328
298/* 329/*
@@ -409,6 +440,7 @@ static void persistent_destroy(struct exception_store *store)
409{ 440{
410 struct pstore *ps = get_info(store); 441 struct pstore *ps = get_info(store);
411 442
443 destroy_workqueue(ps->metadata_wq);
412 dm_io_client_destroy(ps->io_client); 444 dm_io_client_destroy(ps->io_client);
413 vfree(ps->callbacks); 445 vfree(ps->callbacks);
414 free_area(ps); 446 free_area(ps);
@@ -588,6 +620,12 @@ int dm_create_persistent(struct exception_store *store)
588 atomic_set(&ps->pending_count, 0); 620 atomic_set(&ps->pending_count, 0);
589 ps->callbacks = NULL; 621 ps->callbacks = NULL;
590 622
623 ps->metadata_wq = create_singlethread_workqueue("ksnaphd");
624 if (!ps->metadata_wq) {
625 DMERR("couldn't start header metadata update thread");
626 return -ENOMEM;
627 }
628
591 store->destroy = persistent_destroy; 629 store->destroy = persistent_destroy;
592 store->read_metadata = persistent_read_metadata; 630 store->read_metadata = persistent_read_metadata;
593 store->prepare_exception = persistent_prepare; 631 store->prepare_exception = persistent_prepare;